repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
andela-ifageyinbo/django
|
refs/heads/master
|
tests/reverse_lookup/tests.py
|
326
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Poll, User
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
|
cdrttn/samba-regedit
|
refs/heads/regedit-wip
|
buildtools/wafsamba/tests/__init__.py
|
47
|
# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Tests for wafsamba."""
from unittest import (
TestCase,
TestLoader,
)
def test_suite():
names = [
'abi',
'bundled',
'utils',
]
module_names = ['wafsamba.tests.test_' + name for name in names]
loader = TestLoader()
result = loader.suiteClass()
suite = loader.loadTestsFromNames(module_names)
result.addTests(suite)
return result
|
zscproject/OWASP-ZSC
|
refs/heads/master
|
lib/encoder/ruby/base64.py
|
4
|
#!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
import binascii
import random
import string
from core.compatible import version
_version = version()
def encode(f):
val_name = ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
data = ''
if _version is 2:
data = val_name + ' = "' + str(binascii.b2a_base64(f)).replace(
'\n', '') + '";'
if _version is 3:
data = val_name + ' = "' + str(binascii.b2a_base64(f.encode(
'latin-1')).decode('latin-1').replace('\n', '')) + '"'
var_data = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
func_name = ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
func_argv = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
var_str = random.choice(string.ascii_lowercase) + ''.join(
random.choice(string.ascii_lowercase + string.ascii_uppercase)
for i in range(50))
f = '''
require "base64"
%s
def %s(%s)
%s = Base64.decode64(%s)
return %s
end
%s = %s;
eval(%s(%s));''' % (data, func_name, func_argv, var_str, func_argv, var_str,
var_data, val_name, func_name, var_data)
return f
def start(content,cli):
return str(str('=begin\n') + str(content.replace(
'=begin', '#=begin').replace('=end', '#=end')) + str('\n=end') + str(
encode(content)) + str('\n'))
|
ITCoders/Human-detection-and-Tracking
|
refs/heads/master
|
scripts/face_rec_demo.py
|
2
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Philipp Wagner <bytefish[at]gmx[dot]de>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import cv2
import numpy as np
def normalize(X, low, high, dtype=None):
"""Normalizes a given array in X to a value between low and high."""
X = np.asarray(X)
minX, maxX = np.min(X), np.max(X)
# normalize to [0...1].
X -= float(minX)
X /= float((maxX - minX))
# scale to [low...high].
X = X * (high - low)
X = X + low
if dtype is None:
return np.asarray(X)
return np.asarray(X, dtype=dtype)
def read_images(path, sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X, y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = cv2.imread(os.path.join(
subject_path, filename), cv2.IMREAD_GRAYSCALE)
# resize to given size (if given)
if sz is not None:
im = cv2.resize(im, sz)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c + 1
return [X, y]
if __name__ == "__main__":
# This is where we write the images, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print "USAGE: facerec_demo.py </path/to/images> [</path/to/store/images/at>]"
sys.exit()
# Now read in the image data. This must be a valid path!
[X, y] = read_images(sys.argv[1])
# Convert labels to 32bit integers. This is a workaround for 64bit machines,
# because the labels will truncated else. This will be fixed in code as
# soon as possible, so Python users don't need to know about this.
# Thanks to Leo Dirac for reporting:
y = np.asarray(y, dtype=np.int32)
# If a out_dir is given, set it:
if len(sys.argv) == 3:
out_dir = sys.argv[2]
# Create the Eigenfaces model. We are going to use the default
# parameters for this simple example, please read the documentation
# for thresholding:
model = cv2.createEigenFaceRecognizer()
# Read
# Learn the model. Remember our function returns Python lists,
# so we use np.asarray to turn them into NumPy lists to make
# the OpenCV wrapper happy:
model.train(np.asarray(X), np.asarray(y))
# We now get a prediction from the model! In reality you
# should always use unseen images for testing your model.
# But so many people were confused, when I sliced an image
# off in the C++ version, so I am just using an image we
# have trained with.
#
# model.predict is going to return the predicted label and
# the associated confidence:
[p_label, p_confidence] = model.predict(np.asarray(X[0]))
# Print it:
print "Predicted label = %d (confidence=%.2f)" % (p_label, p_confidence)
# Cool! Finally we'll plot the Eigenfaces, because that's
# what most people read in the papers are keen to see.
#
# Just like in C++ you have access to all model internal
# data, because the cv::FaceRecognizer is a cv::Algorithm.
#
# You can see the available parameters with getParams():
print model.getParams()
# Now let's get some data:
mean = model.getMat("mean")
eigenvectors = model.getMat("eigenvectors")
# We'll save the mean, by first normalizing it:
mean_norm = normalize(mean, 0, 255, dtype=np.uint8)
mean_resized = mean_norm.reshape(X[0].shape)
if out_dir is None:
cv2.imshow("mean", mean_resized)
else:
cv2.imwrite("%s/mean.png" % (out_dir), mean_resized)
# Turn the first (at most) 16 eigenvectors into grayscale
# images. You could also use cv::normalize here, but sticking
# to NumPy is much easier for now.
# Note: eigenvectors are stored by column:
for i in xrange(min(len(X), 16)):
eigenvector_i = eigenvectors[:, i].reshape(X[0].shape)
eigenvector_i_norm = normalize(eigenvector_i, 0, 255, dtype=np.uint8)
# Show or save the images:
if out_dir is None:
cv2.imshow("%s/eigenface_%d" % (out_dir, i), eigenvector_i_norm)
else:
cv2.imwrite("%s/eigenface_%d.png" %
(out_dir, i), eigenvector_i_norm)
# Show the images:
if out_dir is None:
cv2.waitKey(0)
|
hroncok/devassistant
|
refs/heads/pathstests
|
features/environment.py
|
8
|
import os
import signal
datadir = os.path.join(os.path.dirname(__file__), 'data')
cache = os.path.join(datadir, '.cache.yaml')
config = os.path.join(datadir, '.config')
lastrun = os.path.join(datadir, 'lastrun.log')
homedir_files = [cache, config, lastrun]
def before_all(context):
context.dagui_scriptname = 'da-gui.py'
context.dagui_scriptpath = os.path.abspath(context.dagui_scriptname)
os.environ['DEVASSISTANT_NO_DEFAULT_PATH'] = '1'
os.environ['DEVASSISTANT_PATH'] = datadir
os.environ['DEVASSISTANT_HOME'] = datadir
def after_scenario(context, scenario):
for f in homedir_files:
if os.path.exists(f):
os.remove(f)
os.kill(context.dagui_pid, signal.SIGKILL)
|
1stvamp/kombu
|
refs/heads/master
|
funtests/tests/test_redis.py
|
30
|
from nose import SkipTest
from funtests import transport
class test_redis(transport.TransportCase):
transport = 'redis'
prefix = 'redis'
def before_connect(self):
try:
import redis # noqa
except ImportError:
raise SkipTest('redis not installed')
def after_connect(self, connection):
client = connection.channel().client
client.info()
def test_cant_connect_raises_connection_error(self):
conn = self.get_connection(port=65534)
self.assertRaises(conn.connection_errors, conn.connect)
|
qedi-r/home-assistant
|
refs/heads/dev
|
homeassistant/components/w800rf32/binary_sensor.py
|
2
|
"""Support for w800rf32 binary sensors."""
import logging
import voluptuous as vol
import W800rf32 as w800
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_DEVICES, CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, event as evt
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util import dt as dt_util
from . import W800RF32_DEVICE
_LOGGER = logging.getLogger(__name__)
CONF_OFF_DELAY = "off_delay"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DEVICES): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
}
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Binary Sensor platform to w800rf32."""
binary_sensors = []
# device_id --> "c1 or a3" X10 device. entity (type dictionary)
# --> name, device_class etc
for device_id, entity in config[CONF_DEVICES].items():
_LOGGER.debug(
"Add %s w800rf32.binary_sensor (class %s)",
entity[CONF_NAME],
entity.get(CONF_DEVICE_CLASS),
)
device = W800rf32BinarySensor(
device_id,
entity.get(CONF_NAME),
entity.get(CONF_DEVICE_CLASS),
entity.get(CONF_OFF_DELAY),
)
binary_sensors.append(device)
add_entities(binary_sensors)
class W800rf32BinarySensor(BinarySensorDevice):
"""A representation of a w800rf32 binary sensor."""
def __init__(self, device_id, name, device_class=None, off_delay=None):
"""Initialize the w800rf32 sensor."""
self._signal = W800RF32_DEVICE.format(device_id)
self._name = name
self._device_class = device_class
self._off_delay = off_delay
self._state = False
self._delay_listener = None
@callback
def _off_delay_listener(self, now):
"""Switch device off after a delay."""
self._delay_listener = None
self.update_state(False)
@property
def name(self):
"""Return the device name."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class."""
return self._device_class
@property
def is_on(self):
"""Return true if the sensor state is True."""
return self._state
@callback
def binary_sensor_update(self, event):
"""Call for control updates from the w800rf32 gateway."""
if not isinstance(event, w800.W800rf32Event):
return
dev_id = event.device
command = event.command
_LOGGER.debug(
"BinarySensor update (Device ID: %s Command %s ...)", dev_id, command
)
# Update the w800rf32 device state
if command in ("On", "Off"):
is_on = command == "On"
self.update_state(is_on)
if self.is_on and self._off_delay is not None and self._delay_listener is None:
self._delay_listener = evt.async_track_point_in_time(
self.hass, self._off_delay_listener, dt_util.utcnow() + self._off_delay
)
def update_state(self, state):
"""Update the state of the device."""
self._state = state
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
async_dispatcher_connect(self.hass, self._signal, self.binary_sensor_update)
|
BruceDai/web-testing-service
|
refs/heads/master
|
wts/tests/xmlhttprequest/w3c/resources/accept-language.py
|
250
|
def main(request, response):
return [("Content-Type", "text/plain"),
request.headers.get("Accept-Language", "NO")]
|
TsarN/pysistem
|
refs/heads/master
|
pysistem/contests/decorators.py
|
1
|
# -*- coding: utf-8 -*-
"""Contest decorators"""
from functools import wraps
from flask import render_template
from pysistem.contests.model import Contest
def yield_contest(field='contest_id', yield_field='contest'):
"""Decorator
Get contest identified by 'field' keyword argument
and save it to 'yield_field' keyword argument.
If contest does not exist return 404 Not Found error
"""
def decorator(func):
"""Decorator of yield_contest"""
@wraps(func)
def decorated_function(*args, **kwargs):
"""Decorated of yield_contest"""
contest = Contest.query.get(int(kwargs.get(field)))
if contest is None:
return render_template('errors/404.html'), 404
kwargs[yield_field] = contest
return func(*args, **kwargs)
return decorated_function
return decorator
|
oxnz/NZChat
|
refs/heads/master
|
NZChat/NZChat.py
|
1
|
#-*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from NZContentPane import NZContentPane
from NZConfig import NZConfig
from NZMessage import NZMessage
from NZMessage import NZMsgType
from NZStatus import NZStatus
from NZSettings import NZSettings
from NZSockets import NZSockets
class NZAvatar(QLabel):
'''display default avatar for newbie'''
def __init__(self, parent=None):
super(NZAvatar, self).__init__(parent)
avatar = QPixmap('./avatar.png')
avatar = avatar.scaled(64, 64)
self.setPixmap(avatar)
class NZStatusBox(QComboBox):
def __init__(self, config, status = NZStatus.ONLINE, parent=None):
super(NZStatusBox, self).__init__(parent)
self.config = config
self.currentIndexChanged['QString'].connect(self.changeStatus)
self.stats = dict({
NZStatus.ONLINE : self.tr('在线'),
NZStatus.OFFLINE : self.tr('离线'),
# NZStatus.OFFLINE : self.tr('隐身'),
})
for s in self.stats:
self.addItem(self.stats[s])
def changeStatus(self, status):
'''handle user status change event, if needed, broadcast
to fellows
'''
if status == self.stats[NZStatus.ONLINE]:
print 'send online notify'
msg = NZMessage(mtype=NZMsgType.ONLINE).encode()
self.config.monitorSocket.writeDatagram(msg, QtNetwork.QHostAddress.Broadcast, self.config.monitorPort)
del msg
elif status == self.stats[NZStatus.OFFLINE]:
print 'one person go offline'
msg = NZMessage(mtype=NZMsgType.OFFLINE).encode()
self.config.monitorSocket.writeDatagram(msg, QtNetwork.QHostAddress.Broadcast, self.config.monitorPort)
del msg
else:
print 'unknow option'
class NZHeaderPane(QFrame):
'''header frame of the main pane
contains user's avatar and status combo box and nickname
'''
def __init__(self, config, settings, nickname, parent=None):
super(NZHeaderPane, self).__init__(parent)
grid = QGridLayout(self)
self.setLayout(grid)
grid.addWidget(NZAvatar(self), 0, 0, 2, 2)
grid.addWidget(NZStatusBox(config, self), 0, 2, 1, 1)
grid.addWidget(settings.account.nickname, 1, 2, 1, 1)
class NZFooterPane(QWidget):
def __init__(self, parent=None):
super(NZFooterPane, self).__init__(parent)
# TODO
class NZChat(QWidget):
def __init__(self, parent=None):
super(NZChat, self).__init__(parent)
# init inner resource first
# such as socket and settings
self.settings = NZSettings(self)
self.sockets = NZSockets(settings)
self.setWindowTitle(self.tr('NZChat'))
self.resize(270, 600)
# move to right top corner
self.move(QApplication.desktop().width() - 400, 0)
self.frame = QFrame(self)
vbox = QVBoxLayout(self.frame)
vbox.setAlignment(Qt.AlignCenter)
self.frame.setLayout(vbox)
self.setCentralWidget(self.frame)
vbox.addWidget(NZHeaderPane(self.config, self), stretch = 3)
self.contentPane = NZContentPane(self.config, self.report, self)
vbox.addWidget(self.contentPane, stretch = 16)
vbox.addWidget(NZFooterPane(self), stretch = 1)
def report(self, err, level):
print 'reporting error'
#TODO
def config(self):
print 'config'
#TODO
def closeEvent(self, event):
pass
self.sendOfflineNotify()
print 'close event handler say....'
def sendOfflineNotify(self):
msg = NZMessage(mtype=NZMsgType.OFFLINE).encode()
self.config.monitorSocket.writeDatagram(msg, QtNetwork.QHostAddress.Broadcast, self.config.monitorPort)
|
stifoon/navitia
|
refs/heads/dev
|
source/jormungandr/jormungandr/protobuf_to_dict.py
|
12
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from google.protobuf.descriptor import FieldDescriptor
__all__ = ["protobuf_to_dict", "TYPE_CALLABLE_MAP"]
TYPE_CALLABLE_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: long,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: long,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: long,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: long,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: long,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: unicode,
FieldDescriptor.TYPE_BYTES: lambda b: b.encode("base64"),
FieldDescriptor.TYPE_ENUM: int,
}
def repeated(type_callable):
return lambda value_list: [type_callable(value) for value in value_list]
def enum_label_name(field, value):
return field.enum_type.values_by_number[int(value)].name
def protobuf_to_dict(pb, type_callable_map=TYPE_CALLABLE_MAP,
use_enum_labels=False):
# recursion!
type_callable_map[FieldDescriptor.TYPE_MESSAGE] = \
lambda pb: protobuf_to_dict(pb, type_callable_map, use_enum_labels)
result_dict = {}
for field, value in pb.ListFields():
if field.type not in type_callable_map:
raise TypeError("Field %s.%s has unrecognised type id %d" % (
pb.__class__.__name__, field.name, field.type))
type_callable = type_callable_map[field.type]
if use_enum_labels and field.type == FieldDescriptor.TYPE_ENUM:
type_callable = lambda value: enum_label_name(field, value)
if field.label == FieldDescriptor.LABEL_REPEATED:
type_callable = repeated(type_callable)
result_dict[field.name] = type_callable(value)
return result_dict
|
stephane-martin/salt-debian-packaging
|
refs/heads/master
|
salt-2016.3.3/tests/integration/modules/mine.py
|
2
|
# -*- coding: utf-8 -*-
'''
Test the salt mine system
'''
# Import Python libs
from __future__ import absolute_import
import time
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class MineTest(integration.ModuleCase):
'''
Test the mine system
'''
def test_get(self):
'''
test mine.get and mine.update
'''
self.assertTrue(self.run_function('mine.update', minion_tgt='minion'))
self.assertTrue(
self.run_function(
'mine.update',
minion_tgt='sub_minion'
)
)
self.assertTrue(
self.run_function(
'mine.get',
['minion', 'test.ping']
)
)
def test_send(self):
'''
test mine.send
'''
self.assertFalse(
self.run_function(
'mine.send',
['foo.__spam_and_cheese']
)
)
self.assertTrue(
self.run_function(
'mine.send',
['grains.items'],
minion_tgt='minion',
)
)
self.assertTrue(
self.run_function(
'mine.send',
['grains.items'],
minion_tgt='sub_minion',
)
)
ret = self.run_function(
'mine.get',
['sub_minion', 'grains.items']
)
self.assertEqual(ret['sub_minion']['id'], 'sub_minion')
ret = self.run_function(
'mine.get',
['minion', 'grains.items'],
minion_tgt='sub_minion'
)
self.assertEqual(ret['minion']['id'], 'minion')
def test_mine_flush(self):
'''
Test mine.flush
'''
for minion_id in ('minion', 'sub_minion'):
self.assertTrue(
self.run_function(
'mine.send',
['grains.items'],
minion_tgt=minion_id
)
)
time.sleep(1)
for minion_id in ('minion', 'sub_minion'):
ret = self.run_function(
'mine.get',
[minion_id, 'grains.items'],
minion_tgt=minion_id
)
self.assertEqual(ret[minion_id]['id'], minion_id)
self.assertTrue(
self.run_function(
'mine.flush',
minion_tgt='minion'
)
)
ret_flushed = self.run_function(
'mine.get',
['*', 'grains.items']
)
self.assertEqual(ret_flushed.get('minion', None), None)
self.assertEqual(ret_flushed['sub_minion']['id'], 'sub_minion')
def test_mine_delete(self):
'''
Test mine.delete
'''
self.assertTrue(
self.run_function(
'mine.send',
['grains.items']
)
)
self.assertTrue(
self.run_function(
'mine.send',
['test.echo', 'foo']
)
)
ret_grains = self.run_function(
'mine.get',
['minion', 'grains.items']
)
self.assertEqual(ret_grains['minion']['id'], 'minion')
ret_echo = self.run_function(
'mine.get',
['minion', 'test.echo']
)
self.assertEqual(ret_echo['minion'], 'foo')
self.assertTrue(
self.run_function(
'mine.delete',
['grains.items']
)
)
ret_grains_deleted = self.run_function(
'mine.get',
['minion', 'grains.items']
)
self.assertEqual(ret_grains_deleted.get('minion', None), None)
ret_echo_stays = self.run_function(
'mine.get',
['minion', 'test.echo']
)
self.assertEqual(ret_echo_stays['minion'], 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(MineTest)
|
xuanhan863/zulip
|
refs/heads/master
|
zilencer/management/commands/create_deployment.py
|
115
|
from __future__ import absolute_import
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from zerver.models import get_realm
from zerver.lib.create_user import random_api_key
from zerver.management.commands.create_realm import Command as CreateRealm
from zilencer.models import Deployment
class Command(BaseCommand):
help = """Create a deployment and accompanying realm."""
option_list = CreateRealm.option_list + (
make_option('--no-realm',
dest='no_realm',
action='store_true',
default=False,
help='Do not create a new realm; associate with an existing one.' + \
' In this case, only the domain and URLs need to be specified.'),
make_option('-a', '--api-url',
dest='api',
type='str'),
make_option('-w', '--web-url',
dest='web',
type='str'),
)
def handle(self, *args, **options):
if None in (options["api"], options["web"], options["domain"]):
print >>sys.stderr, "\033[1;31mYou must provide a domain, an API URL, and a web URL.\033[0m\n"
self.print_help("python manage.py", "create_realm")
exit(1)
if not options["no_realm"]:
CreateRealm().handle(*args, **options)
print # Newline
realm = get_realm(options["domain"])
if realm is None:
print >>sys.stderr, "\033[1;31mRealm does not exist!\033[0m\n"
exit(2)
dep = Deployment()
dep.api_key = random_api_key()
dep.save()
old_dep = realm.deployment
if old_dep is not None:
old_dep.realms.remove(realm)
old_dep.save()
dep.realms = [realm]
dep.base_api_url = options["api"]
dep.base_site_url = options["web"]
dep.save()
print "Deployment %s created." % (dep.id,)
print "DEPLOYMENT_ROLE_NAME = %s" % (dep.name,)
print "DEPLOYMENT_ROLE_KEY = %s" % (dep.api_key,)
|
Alignak-monitoring-contrib/alignak-checks-snmp
|
refs/heads/develop
|
version.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
#
"""
Alignak - Checks pack for NRPE monitored Linux hosts/services
"""
# Package name
__pkg_name__ = u"alignak_checks_snmp"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"snmp"
# Application manifest
__version__ = u"2.0.3"
__author__ = u"Frédéric MOHIER"
__author_email__ = u"frederic.mohier@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-checks-snmp"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak checks pack for Linux SNMP monitored hosts"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
|
GameKinger123x/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/testing/gtest/test/gtest_help_test.py
|
2968
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
hnakamur/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
egaxegax/django-dbcartajs
|
refs/heads/master
|
django/contrib/gis/maps/google/gmap.py
|
174
|
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import xrange
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {0} {1}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{0}.{1}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{0}{1}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{0}\n <script type="text/javascript">\n//<![CDATA[\n{1}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{0}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {0}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
|
romankagan/DDBWorkbench
|
refs/heads/master
|
python/testData/intentions/afterConvertStaticMethodToFunction.py
|
83
|
class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self.a = 1
def my_static_method():
import code
import time
time.sleep(100)
print code
|
jprine/monitoring-module
|
refs/heads/master
|
docs/source/conf.py
|
1
|
# -*- coding: utf-8 -*-
import os
from subprocess import check_output
import sys
git_args = ['git', 'describe', '--tags', '--always']
git_tag = check_output(git_args, universal_newlines=True)
sys.path.insert(0, os.path.abspath('../../src'))
needs_sphinx = '1.3'
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = [
'hec',
'hec.dssgui',
'hec.heclib', 'hec.heclib.util',
'hec.io',
'hec.script'
]
autodoc_member_order = 'bysource'
autoclass_content = 'both'
source_suffix = '.rst'
master_doc = 'index'
project = 'Monitoring Module for HEC-DSSVue'
copyright = '2015, EnviroCentre. All rights reserved.'
version = '.'.join(git_tag.strip('v').split(".")[0:2])
release = git_tag.strip('v')
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
html_last_updated_fmt = '%d/%m/%Y'
html_show_sourcelink = False
html_copy_source = False
html_show_sphinx = False
htmlhelp_basename = 'doc'
|
devcline/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-uldi.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure that when ULDI is on, we link .objs that make up .libs rather than
the .libs themselves.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'uldi'
test.run_gyp('uldi.gyp', chdir=CHDIR)
# When linking with ULDI, the duplicated function from the lib will be an
# error.
test.build('uldi.gyp', 'final_uldi', chdir=CHDIR, status=1)
# And when in libs, the duplicated function will be silently dropped, so the
# build succeeds.
test.build('uldi.gyp', 'final_no_uldi', chdir=CHDIR)
test.pass_test()
|
AngelLMbot/The-Cocky-Website
|
refs/heads/master
|
functions/twitter.py
|
2
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import time
import math
import bs4 as bs
import urllib2
import re
def getStats(TWuser):
TWurl = 'https://twitter.com/' + TWuser
TWlikes = 0
TWretweets = 0
TWcomments = 0
profile = webdriver.FirefoxProfile()
profile.set_preference('permissions.default.image', 2)
driver = webdriver.Firefox(profile)
driver.get(TWurl)
TWname = driver.find_element_by_class_name('ProfileHeaderCard-nameLink').text
TWfollowers = int (driver.find_elements_by_class_name('ProfileNav-value')[2].get_attribute('data-count'))
while driver.find_element_by_class_name('back-to-top').is_displayed()==False:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
tweets = driver.find_elements_by_class_name('tweet')
validTweets = []
for t in tweets:
if t.get_attribute('data-name')==TWname:
validTweets.append(t)
for vt in validTweets:
counter = vt.find_elements_by_class_name('ProfileTweet-actionCountForPresentation')
if counter[0].text!='':
TWcomments += int(counter[0].text)
if counter[1].text!='':
TWretweets += int(counter[1].text)
if counter[3].text!='':
TWlikes += int(counter[3].text)
driver.close()
TWarray = [TWfollowers, TWlikes, TWretweets, TWcomments]
return TWarray
|
ewitz/PhotoHaus
|
refs/heads/master
|
venv/lib/python2.7/site-packages/werkzeug/test.py
|
146
|
# -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
else:
value = to_bytes(value, charset)
write('\r\n\r\n')
write_binary(value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ, buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
|
bigmlcom/python
|
refs/heads/master
|
bigml/tests/test_17_split_dataset.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Splitting dataset
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
class TestSplitDataset(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a split dataset:
Given I create a data source with "<params>" uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a dataset extracting a <rate> sample
And I wait until the dataset is ready less than <time_3> secs
When I compare the datasets' instances
Then the proportion of instances between datasets is <rate>
Examples:
| data | time_1 | time_2 | time_3 | rate |
| ../data/iris.csv | 10 | 10 | 10 | 0.8 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '10', '0.8', '{"category": 12}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file_with_args(self, example[0], example[5])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_split_dataset(self, example[4])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
dataset_create.i_compare_datasets_instances(self)
dataset_create.proportion_datasets_instances(self, example[4])
|
TathagataChakraborti/resource-conflicts
|
refs/heads/master
|
PLANROB-2015/py2.5/lib/python2.5/macurl2path.py
|
332
|
"""Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib
import os
__all__ = ["url2pathname","pathname2url"]
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
#
# XXXX The .. handling should be fixed...
#
tp = urllib.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = pathname.split('/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] != '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
rv = ':'.join(components[1:])
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
# and finally unquote slashes and other funny characters
return urllib.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError, "Cannot convert pathname containing slashes"
components = pathname.split(':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
def test():
for url in ["index.html",
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print '%r -> %r' % (url, url2pathname(url))
for path in ["drive:",
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print '%r -> %r' % (path, pathname2url(path))
if __name__ == '__main__':
test()
|
shahankhatch/scikit-learn
|
refs/heads/master
|
examples/feature_selection/plot_permutation_test_for_classification.py
|
250
|
"""
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
|
impowski/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/gzip.py
|
243
|
import gzip as gzip_module
from cStringIO import StringIO
def main(request, response):
if "content" in request.GET:
output = request.GET["content"]
else:
output = request.body
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(output)
output = out.getvalue()
headers = [("Content-type", "text/plain"),
("Content-Encoding", "gzip"),
("X-Request-Method", request.method),
("X-Request-Query", request.url_parts.query if request.url_parts.query else "NO"),
("X-Request-Content-Length", request.headers.get("Content-Length", "NO")),
("X-Request-Content-Type", request.headers.get("Content-Type", "NO")),
("Content-Length", len(output))]
return headers, output
|
c0defreak/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Doc/includes/sqlite3/execsql_fetchonerow.py
|
96
|
import sqlite3
con = sqlite3.connect("mydb")
cur = con.cursor()
SELECT = "select name_last, age from people order by age, name_last"
# 1. Iterate over the rows available from the cursor, unpacking the
# resulting sequences to yield their elements (name_last, age):
cur.execute(SELECT)
for (name_last, age) in cur:
print('%s is %d years old.' % (name_last, age))
# 2. Equivalently:
cur.execute(SELECT)
for row in cur:
print('%s is %d years old.' % (row[0], row[1]))
|
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/Django-1.6.10/django/conf/locale/nl/formats.py
|
141
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%Y', '%d-%m-%y', # '20-01-2009', '20-01-09'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
)
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '15:23:35'
'%H:%M:%S.%f', # '15:23:35.000200'
'%H.%M:%S', # '15.23:35'
'%H.%M:%S.%f', # '15.23:35.000200'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
)
DATETIME_INPUT_FORMATS = (
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H:%M:%S.%f :
'%d-%m-%Y %H:%M:%S.%f', '%d-%m-%y %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S.%f', # '20-01-2009 15:23:35.000200', '20-01-09 15:23:35.000200', '2009-01-20 15:23:35.000200'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H.%M:%S.%f :
'%d-%m-%Y %H.%M:%S.%f', '%d-%m-%y %H.%M:%S.%f', # '20-01-2009 15.23:35.000200', '20-01-09 15.23:35.000200'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
a-parhom/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/lang_pref/middleware.py
|
12
|
"""
Middleware for Language Preferences
"""
from django.conf import settings
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import parse_accept_lang_header
from openedx.core.djangoapps.lang_pref import COOKIE_DURATION, LANGUAGE_HEADER, LANGUAGE_KEY
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserAPIRequestError
from openedx.core.djangoapps.user_api.preferences.api import (
get_user_preference,
set_user_preference
)
class LanguagePreferenceMiddleware(object):
"""
Middleware for user preferences.
Ensures that, once set, a user's preferences are reflected in the page
whenever they are logged in.
"""
def process_request(self, request):
"""
If a user's UserPreference contains a language preference, use the user's preference.
Save the current language preference cookie as the user's preferred language.
"""
cookie_lang = request.COOKIES.get(settings.LANGUAGE_COOKIE, None)
if cookie_lang:
if request.user.is_authenticated:
set_user_preference(request.user, LANGUAGE_KEY, cookie_lang)
else:
request._anonymous_user_cookie_lang = cookie_lang
accept_header = request.META.get(LANGUAGE_HEADER, None)
if accept_header:
current_langs = parse_accept_lang_header(accept_header)
# Promote the cookie_lang over any language currently in the accept header
current_langs = [(lang, qvalue) for (lang, qvalue) in current_langs if lang != cookie_lang]
current_langs.insert(0, (cookie_lang, 1))
accept_header = ",".join("{};q={}".format(lang, qvalue) for (lang, qvalue) in current_langs)
else:
accept_header = cookie_lang
request.META[LANGUAGE_HEADER] = accept_header
# Allow the new cookie setting to update the language in the user's session
if LANGUAGE_SESSION_KEY in request.session and request.session[LANGUAGE_SESSION_KEY] != cookie_lang:
del request.session[LANGUAGE_SESSION_KEY]
def process_response(self, request, response):
# If the user is logged in, check for their language preference. Also check for real user
# if current user is a masquerading user,
user_pref = None
current_user = None
if hasattr(request, 'user'):
current_user = getattr(request.user, 'real_user', request.user)
if current_user and current_user.is_authenticated:
anonymous_cookie_lang = getattr(request, '_anonymous_user_cookie_lang', None)
if anonymous_cookie_lang:
user_pref = anonymous_cookie_lang
set_user_preference(current_user, LANGUAGE_KEY, anonymous_cookie_lang)
else:
# Get the user's language preference
try:
user_pref = get_user_preference(current_user, LANGUAGE_KEY)
except (UserAPIRequestError, UserAPIInternalError):
# If we can't find the user preferences, then don't modify the cookie
pass
# If set, set the user_pref in the LANGUAGE_COOKIE
if user_pref:
response.set_cookie(
settings.LANGUAGE_COOKIE,
value=user_pref,
domain=settings.SESSION_COOKIE_DOMAIN,
max_age=COOKIE_DURATION,
)
else:
response.delete_cookie(
settings.LANGUAGE_COOKIE,
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
|
KhalidGit/flask
|
refs/heads/master
|
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/werkzeug/contrib/lint.py
|
295
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
|
gtcno/microservices-infrastructure
|
refs/heads/master
|
plugins/callbacks/profile_tasks.py
|
41
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Jharrod LaFon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/unittest/test/support.py
|
153
|
import unittest
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception, e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception, e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super(LoggingResult, self).__init__()
def startTest(self, test):
self._events.append('startTest')
super(LoggingResult, self).startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super(LoggingResult, self).stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super(LoggingResult, self).addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super(LoggingResult, self).addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
|
gyyu/cmu-debate
|
refs/heads/master
|
node_modules/node-gyp/gyp/gyptest.py
|
1752
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
pitrou/numba
|
refs/heads/master
|
numba/cuda/tests/cudapy/test_vectorize_device.py
|
4
|
from __future__ import absolute_import, print_function, division
from numba import vectorize
from numba import cuda, float32
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
@skip_on_cudasim('ufunc API unsupported in the simulator')
class TestCudaVectorizeDeviceCall(unittest.TestCase):
def test_cuda_vectorize_device_call(self):
@cuda.jit(float32(float32, float32, float32), device=True)
def cu_device_fn(x, y, z):
return x ** y / z
def cu_ufunc(x, y, z):
return cu_device_fn(x, y, z)
ufunc = vectorize([float32(float32, float32, float32)], target='cuda')(
cu_ufunc)
N = 100
X = np.array(np.random.sample(N), dtype=np.float32)
Y = np.array(np.random.sample(N), dtype=np.float32)
Z = np.array(np.random.sample(N), dtype=np.float32) + 0.1
out = ufunc(X, Y, Z)
gold = (X ** Y) / Z
self.assertTrue(np.allclose(out, gold))
if __name__ == '__main__':
unittest.main()
|
automatthias/aubio
|
refs/heads/master
|
python/tests/test_peakpicker.py
|
17
|
#! /usr/bin/env python
from numpy.testing import TestCase, assert_equal, assert_almost_equal
from aubio import peakpicker, fvec
class aubio_peakpicker(TestCase):
def test_members(self):
o = peakpicker()
def test_peakpicker_zeroes(self):
o = peakpicker()
assert_equal(o.get_thresholded_input(), 0.)
def test_peakpick_set_threshold(self):
o = peakpicker()
new_threshold = threshold
o.set_threshold(new_threshold)
assert_almost_equal(new_threshold, o.get_threshold())
def test_peakpicker_get_threshold(self):
o = peakpicker()
new_threshold = o.get_threshold()
o.set_threshold(new_threshold)
assert_equal(new_threshold, o.get_threshold())
buf_size = 1024
slice_size = 5
delay = 1
threshold = .9
class aubio_peakpicker_peaks(TestCase):
def setUp(self):
self.o = peakpicker()
self.o.set_threshold (threshold)
self.vec = fvec(buf_size)
def test_peakpicker_impulse(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000.
self.peaks = [a]
def test_peakpicker_ramp_up(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000. / 4. * 1.
vec[a+1] = 1000. / 4. * 2.
vec[a+2] = 1000. / 4. * 3.
vec[a+3] = 1000.
self.peaks = [a+1]
def test_peakpicker_ramp_down(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000.
vec[a+1] = 1000. / 4. * 3.
vec[a+2] = 1000. / 4. * 2.
vec[a+3] = 1000. / 4. * 1.
self.peaks = [a]
def test_peakpicker_plateau(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000. / 2
vec[a+1] = 1000.
vec[a+2] = 1000.
vec[a+3] = 1000.
vec[a+4] = 1000. / 2
self.peaks = [a+1]
def test_peakpicker_consecutive_peaks(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000. / 2
vec[a+1] = 1000.
vec[a+3] = 1000.
vec[a+4] = 1000. / 2
self.peaks = [a]
def test_peakpicker_distant_peaks(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000.
vec[a+7] = 1000.
self.peaks = [a, a+7]
def test_peakpicker_very_distant_peaks(self):
vec = self.vec; o = self.o
a = 345
vec[a] = 1000.
vec[a+67] = 1000.
self.peaks = [a, a+67]
def tearDown(self):
fpeaks = []
for index in range(0,buf_size-slice_size):
sliced = self.vec[index:index+slice_size]
findex = self.o(sliced)
if findex:
# we found a peak
fpeak = index - findex - delay
#print self.peaks, index, '-', findex, '-', delay, '=', fpeak
if not round(index - findex - delay) in self.peaks:
self.fail('missing peak ' + str(fpeak))
fpeaks.append(fpeak)
if len(fpeaks) != len(self.peaks):
self.fail('some peaks of ' + str(self.peaks) + 'were not found, got only ' + str(fpeaks))
#print
#print fpeaks, self.peaks
if __name__ == '__main__':
from unittest import main
main()
|
mvpoland/django-smsgateway
|
refs/heads/master
|
smsgateway/smpplib/client.py
|
1
|
#
# smpplib -- SMPP Library for Python
# Copyright (c) 2005 Martynas Jocius <mjoc@akl.lt>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# Modified by Yusuf Kaka <yusufk at gmail>
# Added support for Optional TLV's
"""SMPP client module"""
from logging import getLogger
from socket import socket, AF_INET, SOCK_STREAM, error as socket_error, timeout
from struct import unpack, error as struct_error
from binascii import b2a_hex
from smsgateway.smpplib.smpp import make_pdu, parse_pdu
from smsgateway.smpplib.pdu import descs, SMPP_ESME_RINVBNDSTS, PDU
SMPP_CLIENT_STATE_CLOSED = 0
SMPP_CLIENT_STATE_OPEN = 1
SMPP_CLIENT_STATE_BOUND_TX = 2
SMPP_CLIENT_STATE_BOUND_RX = 3
SMPP_CLIENT_STATE_BOUND_TRX = 4
logger = getLogger(__name__)
command_states = {
'bind_transmitter': (SMPP_CLIENT_STATE_OPEN,),
'bind_transmitter_resp': (SMPP_CLIENT_STATE_OPEN,),
'bind_receiver': (SMPP_CLIENT_STATE_OPEN,),
'bind_receiver_resp': (SMPP_CLIENT_STATE_OPEN,),
'bind_transceiver': (SMPP_CLIENT_STATE_OPEN,),
'bind_transceiver_resp': (SMPP_CLIENT_STATE_OPEN,),
'outbind': (SMPP_CLIENT_STATE_OPEN,),
'unbind': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'unbind_resp': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'submit_sm': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'submit_sm_resp': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'submit_sm_multi': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'submit_sm_multi_resp': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'data_sm': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'data_sm_resp': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'deliver_sm': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'deliver_sm_resp': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'query_sm': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'query_sm_resp': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'cancel_sm': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'cancel_sm_resp': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'replace_sm': (SMPP_CLIENT_STATE_BOUND_TX,),
'replace_sm_resp': (SMPP_CLIENT_STATE_BOUND_TX,),
'enquire_link': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'enquire_link_resp': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'alert_notification': (SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,),
'generic_nack': (SMPP_CLIENT_STATE_BOUND_TX,
SMPP_CLIENT_STATE_BOUND_RX,
SMPP_CLIENT_STATE_BOUND_TRX,)
}
state_setters = {
'bind_transmitter_resp': SMPP_CLIENT_STATE_BOUND_TX,
'bind_receiver_resp': SMPP_CLIENT_STATE_BOUND_RX,
'bind_transceiver_resp': SMPP_CLIENT_STATE_BOUND_TRX,
'unbind_resp': SMPP_CLIENT_STATE_OPEN
}
#
# Global response number
#
responses = 0
def log(*msg):
"""Log message"""
msg = list(map(str, msg))
class Client:
"""SMPP client class"""
state = SMPP_CLIENT_STATE_CLOSED
host = None
port = None
vendor = None
_socket = None
_stack = [] # PDU stack
_error_stack = None
def __init__(self, host, port):
"""Initialize"""
self.host = host
self.port = int(port)
self._socket = socket(AF_INET, SOCK_STREAM)
self._socket.settimeout(5)
self._error_stack = []
self.receiver_mode = False
def connect(self):
"""Connect to SMSC"""
try:
self._socket.connect((self.host, self.port))
self.state = SMPP_CLIENT_STATE_OPEN
except socket_error:
raise ConnectionError('Connection refused')
def disconnect(self):
"""Disconnect from the SMSC"""
logger.debug('Disconnecting...')
self._socket.close()
self.state = SMPP_CLIENT_STATE_CLOSED
def _bind(self, command_name, **args):
"""Send bind_transmitter command to the SMSC"""
if command_name in ['bind_receiver', 'bind_transceiver']:
logger.debug('I am receiver')
self.receiver_mode = True
p = make_pdu(command_name, **(args))
self.send_pdu(p)
return self.read_pdu()
def bind_transmitter(self, **args):
"""Bind as a transmitter"""
return self._bind('bind_transmitter', **(args))
def bind_receiver(self, **args):
"""Bind as a receiver"""
return self._bind('bind_receiver', **(args))
def bind_transceiver(self, **args):
"""Bind as a transmitter and receiver at once"""
return self._bind('bind_transceiver', **(args))
def unbind(self):
"""Unbind from the SMSC"""
p = make_pdu('unbind')
self.send_pdu(p)
return self.read_pdu()
def send_pdu(self, p):
"""Send PDU to the SMSC"""
if self.state not in command_states[p.command]:
raise Exception('Command {} failed: {}'.format(p.command, descs[SMPP_ESME_RINVBNDSTS]))
self._push_pdu(p)
generated = p.generate()
logger.debug(' '.join(
[str(x) for x in ['>>', b2a_hex(generated), len(generated), 'bytes']]))
self._socket.send(generated)
return True
def read_pdu(self):
"""Read PDU from the SMSC"""
logger.debug('Waiting for PDU...')
raw_len = self._socket.recv(4)
if raw_len == 0:
return False
try:
length = unpack('>L', raw_len)[0]
except struct_error:
logger.debug('Receive broken pdu...')
return False
raw_pdu = self._socket.recv(length - 4)
raw_pdu = raw_len + raw_pdu
logger.debug(' '.join(
[str(x) for x in ['<<', b2a_hex(raw_pdu), len(raw_pdu), 'bytes']]))
PDU.extract_command(raw_pdu)
p = parse_pdu(raw_pdu)
self._push_pdu(p)
if p.is_error():
raise Exception('({}) {}: {}'.format(p.status, p.command, descs[p.status]))
elif p.command in list(state_setters.keys()):
self.state = state_setters[p.command]
return p
def accept(self, object):
"""Accept an object"""
raise NotImplementedError('not implemented')
def _message_received(self, p):
"""Handler for received message event"""
dsmr = make_pdu('deliver_sm_resp')
dsmr.sequence = p.sequence
self.send_pdu(dsmr)
return self.message_received_handler(pdu=p)
def _enquire_link_received(self):
ler = make_pdu('enquire_link_resp')
self.send_pdu(ler)
logger.debug('Link Enuiry...')
def set_message_received_handler(self, func):
"""Set new function to handle message receive event"""
self.message_received_handler = func
@staticmethod
def message_received_handler(**args):
"""Custom handler to process received message. May be overridden"""
logger.debug('Message received handler (shoud be overridden)')
def listen(self):
"""Listen for PDUs and act"""
if not self.receiver_mode:
raise Exception('Client.listen() is not allowed to be invoked manually for non receiver connection')
while True:
try:
p = self.read_pdu()
except timeout:
logger.debug('Socket timeout, listening again')
continue
if p.command == 'unbind':
break
elif p.command == 'deliver_sm':
keep_listening = self._message_received(p)
if not keep_listening:
break
elif p.command == 'enquire_link':
self._enquire_link_received()
def send_message(self, **args):
"""Send message
Required Arguments:
source_addr_ton -- Source address TON
source_addr -- Source address (string)
dest_addr_ton -- Destination address TON
destination_addr -- Destination address (string)
short_message -- Message text (string)"""
ssm = make_pdu('submit_sm', **(args))
self.send_pdu(ssm)
resp = self.read_pdu()
return resp
def _push_pdu(self, p):
"""Push PDU into a stack"""
if p.is_request():
k = 'request'
else:
k = 'response'
self._stack.append({p.sequence: {k: p}})
class ConnectionError(Exception):
"""Connection error"""
#
# Main block for testing
#
if __name__ == '__main__':
from sys import path
path.insert(0, '..')
from smpplib import client as smpplib_client
def recv_handler(**args):
pass
client = smpplib_client.Client('localhost', 2775)
client.connect()
client.set_message_received_handler(recv_handler)
try:
client.bind_transceiver(system_id='smppclient1', password='pwd1', system_type='www')
client.listen()
finally:
client.unbind()
client.disconnect()
|
Frodox/buildbot
|
refs/heads/master
|
master/buildbot/util/service.py
|
1
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import itervalues
import hashlib
from twisted.application import service
from twisted.internet import defer
from twisted.internet import task
from twisted.python import failure
from twisted.python import log
from twisted.python import reflect
from twisted.python.reflect import accumulateClassList
from buildbot import util
from buildbot.util import ascii2unicode
from buildbot.util import config
from buildbot.util import unicode2bytes
class ReconfigurableServiceMixin(object):
reconfig_priority = 128
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
if not service.IServiceCollection.providedBy(self):
return
# get a list of child services to reconfigure
reconfigurable_services = [svc
for svc in self
if isinstance(svc, ReconfigurableServiceMixin)]
# sort by priority
reconfigurable_services.sort(key=lambda svc: -svc.reconfig_priority)
for svc in reconfigurable_services:
yield svc.reconfigServiceWithBuildbotConfig(new_config)
# twisted 16's Service is now an new style class, better put everybody new style
# to catch issues even on twisted < 16
class AsyncService(service.Service, object):
@defer.inlineCallbacks
def setServiceParent(self, parent):
if self.parent is not None:
yield self.disownServiceParent()
parent = service.IServiceCollection(parent, parent)
self.parent = parent
yield self.parent.addService(self)
# We recurse over the parent services until we find a MasterService
@property
def master(self):
if self.parent is None:
return None
return self.parent.master
class AsyncMultiService(AsyncService, service.MultiService):
def startService(self):
service.Service.startService(self)
dl = []
# if a service attaches another service during the reconfiguration
# then the service will be started twice, so we don't use iter, but rather
# copy in a list
for svc in list(self):
# handle any deferreds, passing up errors and success
dl.append(defer.maybeDeferred(svc.startService))
return defer.gatherResults(dl, consumeErrors=True)
def stopService(self):
service.Service.stopService(self)
dl = []
services = list(self)
services.reverse()
for svc in services:
dl.append(defer.maybeDeferred(svc.stopService))
# unlike MultiService, consume errors in each individual deferred, and
# pass the first error in a child service up to our caller
return defer.gatherResults(dl, consumeErrors=True)
def addService(self, service):
if service.name is not None:
if service.name in self.namedServices:
raise RuntimeError("cannot have two services with same name"
" '%s'" % service.name)
self.namedServices[service.name] = service
self.services.append(service)
if self.running:
# It may be too late for that, but we will do our best
service.privilegedStartService()
return service.startService()
return defer.succeed(None)
class MasterService(AsyncMultiService):
# master service is the service that stops the master property recursion
@property
def master(self):
return self
class SharedService(AsyncMultiService):
"""a service that is created only once per parameter set in a parent service"""
@classmethod
def getService(cls, parent, *args, **kwargs):
name = cls.getName(*args, **kwargs)
if name in parent.namedServices:
return defer.succeed(parent.namedServices[name])
try:
instance = cls(*args, **kwargs)
except Exception:
# we transform all exceptions into failure
return defer.fail(failure.Failure())
# The class is not required to initialized its name
# but we use the name to identify the instance in the parent service
# so we force it with the name we used
instance.name = name
d = instance.setServiceParent(parent)
@d.addCallback
def returnInstance(res):
# we put the service on top of the list, so that it is stopped the last
# This make sense as the shared service is used as a dependency
# for other service
parent.services.remove(instance)
parent.services.insert(0, instance)
# hook the return value to the instance object
return instance
return d
@classmethod
def getName(cls, *args, **kwargs):
_hash = hashlib.sha1()
for arg in args:
arg = unicode2bytes(str(arg))
_hash.update(arg)
for k, v in sorted(kwargs.items()):
k = unicode2bytes(str(k))
v = unicode2bytes(str(v))
_hash.update(k)
_hash.update(v)
return cls.__name__ + "_" + _hash.hexdigest()
class BuildbotService(AsyncMultiService, config.ConfiguredMixin, util.ComparableMixin,
ReconfigurableServiceMixin):
compare_attrs = ('name', '_config_args', '_config_kwargs')
name = None
configured = False
objectid = None
def __init__(self, *args, **kwargs):
name = kwargs.pop("name", None)
if name is not None:
self.name = ascii2unicode(name)
self.checkConfig(*args, **kwargs)
if self.name is None:
raise ValueError(
"%s: must pass a name to constructor" % type(self))
self._config_args = args
self._config_kwargs = kwargs
self.rendered = False
AsyncMultiService.__init__(self)
def getConfigDict(self):
_type = type(self)
return {'name': self.name,
'class': _type.__module__ + "." + _type.__name__,
'args': self._config_args,
'kwargs': self._config_kwargs}
@defer.inlineCallbacks
def reconfigServiceWithSibling(self, sibling):
# only reconfigure if sibling is configured differently.
# sibling == self is using ComparableMixin's implementation
# only compare compare_attrs
if self.configured and sibling == self:
defer.returnValue(None)
self.configured = True
# render renderables in parallel
# Properties import to resolve cyclic import issue
from buildbot.process.properties import Properties
p = Properties()
p.master = self.master
# render renderables in parallel
secrets = []
kwargs = {}
accumulateClassList(self.__class__, 'secrets', secrets)
for k, v in sibling._config_kwargs.items():
if k in secrets:
value = yield p.render(v)
setattr(self, k, value)
kwargs.update({k: value})
else:
kwargs.update({k: v})
d = yield self.reconfigService(*sibling._config_args,
**sibling._config_kwargs)
defer.returnValue(d)
def configureService(self):
# reconfigServiceWithSibling with self, means first configuration
return self.reconfigServiceWithSibling(self)
@defer.inlineCallbacks
def startService(self):
if not self.configured:
try:
yield self.configureService()
except NotImplementedError:
pass
yield AsyncMultiService.startService(self)
def checkConfig(self, *args, **kwargs):
return defer.succeed(True)
def reconfigService(self, name=None, *args, **kwargs):
return defer.succeed(None)
class ClusteredBuildbotService(BuildbotService):
"""
ClusteredBuildbotService-es are meant to be executed on a single
master only. When starting such a service, by means of "yield startService",
it will first try to claim it on the current master and:
- return without actually starting it
if it was already claimed by another master (self.active == False).
It will however keep trying to claim it, in case another master
stops, and takes the job back.
- return after it starts else.
"""
compare_attrs = ('name',)
POLL_INTERVAL_SEC = 5 * 60 # 5 minutes
serviceid = None
active = False
def __init__(self, *args, **kwargs):
self.serviceid = None
self.active = False
self._activityPollCall = None
self._activityPollDeferred = None
super(ClusteredBuildbotService, self).__init__(*args, **kwargs)
# activity handling
def isActive(self):
return self.active
def activate(self):
# will run when this instance becomes THE CHOSEN ONE for the cluster
return defer.succeed(None)
def deactivate(self):
# to be overridden by subclasses
# will run when this instance loses its chosen status
return defer.succeed(None)
# service arbitration hooks
def _getServiceId(self):
# retrieve the id for this service; we assume that, once we have a valid id,
# the id doesn't change. This may return a Deferred.
raise NotImplementedError
def _claimService(self):
# Attempt to claim the service for this master. Should return True or False
# (optionally via a Deferred) to indicate whether this master now owns the
# service.
raise NotImplementedError
def _unclaimService(self):
# Release the service from this master. This will only be called by a claimed
# service, and this really should be robust and release the claim. May return
# a Deferred.
raise NotImplementedError
# default implementation to delegate to the above methods
@defer.inlineCallbacks
def startService(self):
# subclasses should override startService only to perform actions that should
# run on all instances, even if they never get activated on this
# master.
yield super(ClusteredBuildbotService, self).startService()
self._startServiceDeferred = defer.Deferred()
self._startActivityPolling()
yield self._startServiceDeferred
def stopService(self):
# subclasses should override stopService only to perform actions that should
# run on all instances, even if they never get activated on this
# master.
self._stopActivityPolling()
# need to wait for prior activations to finish
if self._activityPollDeferred:
d = self._activityPollDeferred
else:
d = defer.succeed(None)
@d.addCallback
def deactivate_if_needed(_):
if self.active:
self.active = False
d = defer.maybeDeferred(self.deactivate)
# no errback here: skip the "unclaim" if the deactivation is
# uncertain
d.addCallback(
lambda _: defer.maybeDeferred(self._unclaimService))
d.addErrback(
log.err, _why="Caught exception while deactivating ClusteredService(%s)" % self.name)
return d
d.addCallback(
lambda _: super(ClusteredBuildbotService, self).stopService())
return d
def _startActivityPolling(self):
self._activityPollCall = task.LoopingCall(self._activityPoll)
# plug in a clock if we have one, for tests
if hasattr(self, 'clock'):
self._activityPollCall.clock = self.clock
d = self._activityPollCall.start(self.POLL_INTERVAL_SEC, now=True)
self._activityPollDeferred = d
# this should never happen, but just in case:
d.addErrback(log.err, 'while polling for service activity:')
def _stopActivityPolling(self):
if self._activityPollCall:
self._activityPollCall.stop()
self._activityPollCall = None
return self._activityPollDeferred
def _callbackStartServiceDeferred(self):
if self._startServiceDeferred is not None:
self._startServiceDeferred.callback(None)
self._startServiceDeferred = None
@defer.inlineCallbacks
def _activityPoll(self):
try:
# just in case..
if self.active:
return
if self.serviceid is None:
self.serviceid = yield self._getServiceId()
try:
claimed = yield self._claimService()
except Exception:
log.err(
_why='WARNING: ClusteredService(%s) got exception while trying to claim' % self.name)
return
if not claimed:
# this master is not responsible
# for this service, we callback for StartService
# if it was not callback-ed already,
# and keep polling to take back the service
# if another one lost it
self._callbackStartServiceDeferred()
return
try:
# this master is responsible for this service
# we activate it
self.active = True
yield self.activate()
except Exception:
# this service is half-active, and noted as such in the db..
log.err(
_why='WARNING: ClusteredService(%s) is only partially active' % self.name)
finally:
# cannot wait for its deactivation
# with yield self._stopActivityPolling
# as we're currently executing the
# _activityPollCall callback
# we just call it without waiting its stop
# (that may open race conditions)
self._stopActivityPolling()
self._callbackStartServiceDeferred()
except Exception:
# don't pass exceptions into LoopingCall, which can cause it to
# fail
log.err(
_why='WARNING: ClusteredService(%s) failed during activity poll' % self.name)
class BuildbotServiceManager(AsyncMultiService, config.ConfiguredMixin,
ReconfigurableServiceMixin):
config_attr = "services"
name = "services"
def getConfigDict(self):
return {'name': self.name,
'childs': [v.getConfigDict()
for v in itervalues(self.namedServices)]}
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
# arrange childs by name
old_by_name = self.namedServices
old_set = set(old_by_name)
new_config_attr = getattr(new_config, self.config_attr)
if isinstance(new_config_attr, list):
new_by_name = dict([(s.name, s)
for s in new_config_attr])
elif isinstance(new_config_attr, dict):
new_by_name = new_config_attr
else:
raise TypeError(
"config.%s should be a list or dictionary" % (self.config_attr))
new_set = set(new_by_name)
# calculate new childs, by name, and removed childs
removed_names, added_names = util.diffSets(old_set, new_set)
# find any childs for which the fully qualified class name has
# changed, and treat those as an add and remove
# While we're at it find any service that don't know how to reconfig,
# and, if they have changed, add them to both removed and added, so that we
# run the new version
for n in old_set & new_set:
old = old_by_name[n]
new = new_by_name[n]
# detect changed class name
if reflect.qual(old.__class__) != reflect.qual(new.__class__):
removed_names.add(n)
added_names.add(n)
# compare using ComparableMixin if they don't support reconfig
elif not hasattr(old, 'reconfigServiceWithBuildbotConfig'):
if old != new:
removed_names.add(n)
added_names.add(n)
if removed_names or added_names:
log.msg("adding %d new %s, removing %d" %
(len(added_names), self.config_attr, len(removed_names)))
for n in removed_names:
child = old_by_name[n]
# disownServiceParent calls stopService after removing the relationship
# as child might use self.master.data to stop itself, its better to stop it first
# (this is related to the fact that self.master is found by recursively looking at self.parent
# for a master)
yield child.stopService()
# it has already called, so do not call it again
child.stopService = lambda: None
yield child.disownServiceParent()
# HACK: we still keep a reference to the master for some cleanup tasks which are not waited by
# to stopService (like the complex worker disconnection mechanism)
# http://trac.buildbot.net/ticket/3583
child.parent = self.master
for n in added_names:
child = new_by_name[n]
# setup service's objectid
if hasattr(child, 'objectid'):
class_name = '%s.%s' % (child.__class__.__module__,
child.__class__.__name__)
objectid = yield self.master.db.state.getObjectId(
child.name, class_name)
child.objectid = objectid
yield defer.maybeDeferred(child.setServiceParent, self)
# As the services that were just added got
# reconfigServiceWithSibling called by
# setServiceParent->startService,
# we avoid calling it again by selecting
# in reconfigurable_services, services
# that were not added just now
reconfigurable_services = [svc for svc in self
if svc.name not in added_names]
# sort by priority
reconfigurable_services.sort(key=lambda svc: -svc.reconfig_priority)
for svc in reconfigurable_services:
if not svc.name:
raise ValueError(
"%r: child %r should have a defined name attribute", self, svc)
config_sibling = new_by_name.get(svc.name)
try:
yield svc.reconfigServiceWithSibling(config_sibling)
except NotImplementedError:
# legacy support. Its too painful to transition old code to new Service life cycle
# so we implement switch of child when the service raises NotImplementedError
# Note this means that self will stop, and sibling will take ownership
# means that we have a small time where the service is unavailable.
yield svc.disownServiceParent()
config_sibling.objectid = svc.objectid
yield config_sibling.setServiceParent(self)
|
napsternxg/gensim
|
refs/heads/develop
|
gensim/sklearn_api/phrases.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for `gensim.models.phrases.Phrases`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.sklearn_api.phrases import PhrasesTransformer
>>>
>>> # Create the model. Make sure no term is ignored and combinations seen 3+ times are captured.
>>> m = PhrasesTransformer(min_count=1, threshold=3)
>>> texts = [
... ['I', 'love', 'computer', 'science'],
... ['computer', 'science', 'is', 'my', 'passion'],
... ['I', 'studied', 'computer', 'science']
... ]
>>>
>>> # Use sklearn fit_transform to see the transformation.
>>> # Since computer and science were seen together 3+ times they are considered a phrase.
>>> assert ['I', 'love', 'computer_science'] == m.fit_transform(texts)[0]
"""
from six import string_types
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.models.phrases import Phraser
class PhrasesTransformer(TransformerMixin, BaseEstimator):
"""Base Phrases module, wraps :class:`~gensim.models.phrases.Phrases`.
For more information, please have a look to `Mikolov, et. al: "Distributed Representations
of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_ and
`Gerlof Bouma: "Normalized (Pointwise) Mutual Information in Collocation Extraction"
<https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_.
"""
def __init__(self, min_count=5, threshold=10.0, max_vocab_size=40000000,
delimiter=b'_', progress_per=10000, scoring='default', common_terms=frozenset()):
"""
Parameters
----------
min_count : int, optional
Terms with a count lower than this will be ignored
threshold : float, optional
Only phrases scoring above this will be accepted, see `scoring` below.
max_vocab_size : int, optional
Maximum size of the vocabulary. Used to control pruning of less common words, to keep memory under control.
The default of 40M needs about 3.6GB of RAM.
delimiter : str, optional
Character used to join collocation tokens, should be a byte string (e.g. b'_').
progress_per : int, optional
Training will report to the logger every that many phrases are learned.
scoring : str or function, optional
Specifies how potential phrases are scored for comparison to the `threshold`
setting. `scoring` can be set with either a string that refers to a built-in scoring function,
or with a function with the expected parameter names. Two built-in scoring functions are available
by setting `scoring` to a string:
* 'default': `Mikolov, et. al: "Distributed Representations of Words and Phrases
and their Compositionality" <https://arxiv.org/abs/1310.4546>`_.
* 'npmi': Explained in `Gerlof Bouma: "Normalized (Pointwise) Mutual Information in Collocation
Extraction" <https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_.
'npmi' is more robust when dealing with common words that form part of common bigrams, and
ranges from -1 to 1, but is slower to calculate than the default.
To use a custom scoring function, create a function with the following parameters and set the `scoring`
parameter to the custom function, see :func:`~gensim.models.phrases.original_scorer` as example.
You must define all the parameters (but can use only part of it):
* worda_count: number of occurrences in `sentences` of the first token in the phrase being scored
* wordb_count: number of occurrences in `sentences` of the second token in the phrase being scored
* bigram_count: number of occurrences in `sentences` of the phrase being scored
* len_vocab: the number of unique tokens in `sentences`
* min_count: the `min_count` setting of the Phrases class
* corpus_word_count: the total number of (non-unique) tokens in `sentences`
A scoring function without any of these parameters (even if the parameters are not used) will
raise a ValueError on initialization of the Phrases class. The scoring function must be pickleable.
common_terms : set of str, optional
List of "stop words" that won't affect frequency count of expressions containing them.
Allow to detect expressions like "bank_of_america" or "eye_of_the_beholder".
"""
self.gensim_model = None
self.phraser = None
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.delimiter = delimiter
self.progress_per = progress_per
self.scoring = scoring
self.common_terms = common_terms
def __setstate__(self, state):
self.__dict__ = state
self.common_terms = frozenset()
self.phraser = None
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : iterable of list of str
Sequence of sentences to be used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.phrases.PhrasesTransformer`
The trained model.
"""
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring, common_terms=self.common_terms
)
self.phraser = Phraser(self.gensim_model)
return self
def transform(self, docs):
"""Transform the input documents into phrase tokens.
Words in the sentence will be joined by `self.delimiter`.
Parameters
----------
docs : {iterable of list of str, list of str}
Sequence of documents to be used transformed.
Returns
-------
iterable of str
Phrase representation for each of the input sentences.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
if self.phraser is None:
self.phraser = Phraser(self.gensim_model)
# input as python lists
if isinstance(docs[0], string_types):
docs = [docs]
return [self.phraser[doc] for doc in docs]
def partial_fit(self, X):
"""Train model over a potentially incomplete set of sentences.
This method can be used in two ways:
1. On an unfitted model in which case the model is initialized and trained on `X`.
2. On an already fitted model in which case the X sentences are **added** to the vocabulary.
Parameters
----------
X : iterable of list of str
Sequence of sentences to be used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.phrases.PhrasesTransformer`
The trained model.
"""
if self.gensim_model is None:
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring, common_terms=self.common_terms
)
self.gensim_model.add_vocab(X)
self.phraser = Phraser(self.gensim_model)
return self
|
yichaoS/cbioportal
|
refs/heads/master
|
docs/conf.py
|
16
|
# -*- coding: utf-8 -*-
#
# cBioPortal documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 7 16:41:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import recommonmark
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.md'
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'README'
# General information about the project.
project = u'cBioPortal'
copyright = u'2016, cBioPortal'
author = u'cBioPortal Team'
github_doc_root = 'https://github.com/cBioPortal/cBioPortal/tree/master/docs/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.2.2'
# The full version, including alpha/beta/rc tags.
release = u'1.2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo_only': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_bootstrap_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'cBioPortal v1.2.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../portal/src/main/webapp/images/cbioportal_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cBioPortaldoc'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Use RTD theme
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_style = 'css/custom.css'
else:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/custom.css',
],
}
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'enable_auto_toc_tree': False,
}, True)
app.add_transform(AutoStructify)
|
Saren-Arterius/BukkitPluginsUpdater
|
refs/heads/master
|
bukkit_plugin.py
|
1
|
#!/usr/bin/python3.3
from database import *
from sys import argv
from zipfile import ZipFile, is_zipfile
from re import findall, sub, split
from pyquery import PyQuery as pq
from error import Error
from hashlib import md5
from os.path import dirname, realpath
import http.cookiejar
import webbrowser
import urllib.request
class bukkitPlugin(ZipFile):
def __init__(self, path):
zip = ZipFile(path)
with zip.open("plugin.yml") as pluginInfo:
yaml = pluginInfo.read().decode()
self.path = path
self.origin = dirname(path)
self.name = sub("\r|\n", "", findall("name: (.*)", yaml)[0])
self.version = sub("\r|\n", "", findall("version: (.*)", yaml)[0])
self.packageName = sub("\r|\n", "", findall("main: (.*)", yaml)[0])
self.hash = md5(bytes("{0}{1}".format(self.origin, yaml), "utf-8")).hexdigest()
self.fileHash = self.__getMD5()
self.database = database()
def __str__(self):
return "Craftbukkit plugin: {0} {1}\nFile origin: {2}\nPackage name: {3}\nIdent hash: {4}\nFile hash: {5}\n".format(self.name, self.version, self.origin, self.packageName, self.hash, self.fileHash)
def __getMD5(self):
with open(self.path, "rb") as file:
hash = md5()
while True:
piece = file.read(1024)
if piece:
hash.update(piece)
else:
return hash.hexdigest()
def __getGoogleResult(self):
try:
url = "http://www.google.com.hk/search?q=\"{0}\"+files+site%3Adev.bukkit.org%2Fbukkit-plugins".format(sub(" ", "+", self.name))
cookieFile = dirname(realpath(__file__)) + "\\" + "cookie.txt"
cj = http.cookiejar.LWPCookieJar(cookieFile)
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
resp = opener.open(url)
return resp.read().decode()
except Exception as e: #Not tested
try:
print("Failed to use google!\nProbably you used too much!\n{0}\nAttemping human captcha...".format(str(e)))
errorPage = e.read().decode()
imageUrl = "https://ipv4.google.com/sorry/" + pq(errorPage)("img").attr("src")
id = pq(errorPage)("input[name='id']")
gContinue = url
webbrowser.open(imageUrl)
captcha = input("Please type captcha here: ")
payload = [urllib.parse.quote_plus(url), urllib.parse.quote_plus(id), urllib.parse.quote_plus(captcha), urllib.parse.quote_plus("Submit")]
sorryReqUrl = "https://ipv4.google.com/sorry/CaptchaRedirect?continue={0}&id={1}&captcha={2}&submit={3}".format(payload)
resp = opener.open(sorryReqUrl)
return resp.read().decode()
except Exception as e:
raise Error("Failed to use google! Probably you used too much!\n{0}".format(str(e)))
def __getBukkitDevName(self):
bukkitDevName = self.database.selectRow(self.packageName)
if bukkitDevName:
print("{0}: bukkitDevName found!".format(self.name))
return bukkitDevName
else:
print("{0}: bukkitDevName not found, using Google.".format(self.name))
try:
self.googleResult
print("Using cache.")
except:
self.googleResult = self.__getGoogleResult()
for elem in pq(self.googleResult)(".r").find("a"):
bukkitDevName = findall("dev.bukkit.org/bukkit-plugins/(.+)/", pq(elem).attr("href"))
bukkitDevName = str(split("/", bukkitDevName[0])[0])
if bukkitDevName:
self.database.newRow(self.packageName, bukkitDevName)
print("{0}: bukkitDevName ({1}) for package ({2}) inserted!".format(self.name, bukkitDevName, self.packageName))
return bukkitDevName
raise Error("Failed to get bukkit files page, or plugin is not available on bukkitdev.")
def __getFilesPage(self):
try:
self.bukkitDevName
except:
self.bukkitDevName = self.__getBukkitDevName()
url = "http://dev.bukkit.org/bukkit-plugins/{0}/files/".format(self.bukkitDevName)
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
resp = opener.open(url)
return resp.read().decode()
def getAllVersions(self):
try:
return self.versions
except:
self.filesPage = self.__getFilesPage()
table = []
for index, column in enumerate(pq(self.filesPage)("tbody").find("tr")):
table.append({})
for cell in pq(column).find("td"):
if pq(cell).attr("class") == "col-file":
table[index]["Name"] = {"Name": pq(cell)("a").html(), "href": "http://dev.bukkit.org" + pq(cell)("a").attr("href")}
elif pq(cell).attr("class") == "col-type":
table[index]["Release type"] = pq(cell)("span").html()
elif pq(cell).attr("class") == "col-status":
table[index]["Status"] = pq(cell)("span").html()
elif pq(cell).attr("class") == "col-date":
table[index]["Date"] = int(pq(cell)("span").attr("data-epoch"))
elif pq(cell).attr("class") == "col-game-version":
try:
table[index]["Game version"] = [findall("(\d\.\d\.\d)", pq(li).html())[0] for li in pq(cell).find("li")]
except:
table[index]["Game version"] = [pq(li).html() for li in pq(cell).find("li")]
elif pq(cell).attr("class") == "col-filename":
table[index]["Filename"] = sub(" +|\r|\n", "", pq(cell).html())
elif pq(cell).attr("class") == "col-downloads":
table[index]["Downloads"] = int(pq(cell)("span").attr("data-value"))
self.versions = table
return self.versions
def getVersionUrl(self, index):
try:
downloadPageUrl = self.versions[index]["Name"]["href"]
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
resp = opener.open(downloadPageUrl)
downloadPage = resp.read().decode()
versionHash = findall("([a-f\d]{32})", downloadPage)[0]
if not self.areHashesMatch(versionHash, downloadPageUrl):
self.database.setVersionHash(versionHash, downloadPageUrl)
return pq(downloadPage)(".user-action-download").find("a").attr("href")
except KeyError:
raise Error("Bukkit page is broken?")
def areHashesMatch(self, hash, href):
cmpHash = self.database.getVersionHash(href)
if cmpHash == hash:
return True
else:
return False
if __name__ == "__main__": #Test
plugin = bukkitPlugin(argv[1])
print(plugin.getVersionUrl(0))
|
ytaben/cyphesis
|
refs/heads/master
|
rulesets/mason/world/tasks/Baking.py
|
3
|
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2011 Jekin Trivedi <jekintrivedi@gmail.com> (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Point3D
from physics import Vector3D
import server
class Baking(server.Task):
"""A task for making various structures using skills with pickaxe"""
materials = ["earth_wall" , "board_wall" ]
def craft_operation(self, op):
""" Op handler for crafting op which activates this task """
if len(op) < 1:
sys.stderr.write("Baking task has no target in crafting op")
# FIXME Use weak references, once we have them
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
self.pos = Point3D(op[0].pos)
def tick_operation(self, op):
""" Op handler for regular tick op """
if self.target is None:
# print "Target is no more"
self.irrelevant()
return
self.rate = 0.5 / 0.75
self.progress += 1
res=Oplist()
if self.progress < 1:
# print "Not done yet"
return self.next_tick(0.75)
self.progress = 0
# counter for Earthwall , board_wall & total count of entity.
ecount = 0
bcount = 0
count = 0
# List which stores the to be consumed entity
raw_materials = []
for item in self.character.contains:
if item.type[0] == str(self.materials[0]):
raw_materials.append(item)
ecount = ecount + 1
if item.type[0] == str(self.materials[1]):
raw_materials.append(item)
bcount = bcount + 1
else:
print item, "Not suffcient material in inventory"
count = ecount + bcount
chunk_loc = self.target().location.copy()
chunk_loc.coordinates = self.target().location.coordinates
chunk_loc.orientation = self.target().location.orientation
# Select which structure to produce depending on the recipe present in inventory
if ecount == 1 :
if bcount == 1 :
create=Operation("create", Entity(name = "castle_wall_run", type = "castle_wall_run", location = chunk_loc), to = self.target())
res.append(create)
if ecount == 2 :
if bcount == 4 :
create=Operation("create", Entity(name = "castle_wall_corner", type = "castle_wall_corner", location = chunk_loc), to = self.target())
res.append(create)
if ecount == 3 :
if bcount == 2 :
create=Operation("create", Entity(name = "castle_wall_stairs", type = "castle_wall_stairs", location = chunk_loc), to = self.target())
res.append(create)
if bcount == 3 :
create=Operation("create", Entity(name = "castle_wall_gate", type = "castle_wall_gate", location = chunk_loc), to = self.target())
res.append(create)
if ecount == 4 :
if bcount == 3 :
create=Operation("create", Entity(name = "castle_house_a", type = "castle_house_a", location = chunk_loc), to = self.target())
res.append(create)
# Consume the materials according to the recipe
while (count > 0) :
tar = raw_materials.pop()
set = Operation("set", Entity(tar.id, status = -1), to = tar)
res.append(set)
count = count - 1
self.progress = 1
self.irrelevant()
return res
|
shaulkf/bitcoin
|
refs/heads/master
|
qa/rpc-tests/bip65-cltv-p2p.py
|
11
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP2, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP2, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
'''
This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY)
Connect to a single node.
Mine 2 (version 3) blocks (save the coinbases for later).
Generate 98 more version 3 blocks, verify the node accepts.
Mine 749 version 4 blocks, verify the node accepts.
Check that the new CLTV rules are not enforced on the 750th version 4 block.
Check that the new CLTV rules are enforced on the 751st version 4 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP65Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = time.time()
''' 98 more version 3 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 4 blocks '''
test_blocks = []
for i in xrange(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new CLTV rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new CLTV rules are enforced in the 751st version 4
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP65Test().main()
|
mihailignatenko/erp
|
refs/heads/master
|
addons/procurement_jit_stock/__init__.py
|
242
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import procurement_jit_stock
|
codingcave/Chimera-Solve
|
refs/heads/master
|
CreateMakefile.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Variable definition
makefile = 'Makefile'
executable = 'chimera'
extend_library = 'module'
extend_library_name = 'lib' + extend_library + '.so'
core_library = 'chimera'
core_library_name = 'lib' + core_library + '.so'
runtime_library = 'runtime'
runtime_library_name = 'lib' + runtime_library + '.so'
lua_path = 'lua-5.3.3'
lua_src_path = os.path.join(lua_path, 'src')
lua_lib = 'liblua.so.5.3.3'
lua_lib_ln = 'liblua.so'
part_main = 'main'
part_runtime = 'runtime'
part_extend = 'extend'
part_core = 'core'
part_module = 'module'
flags = {
part_main: {
'inc': ['-Ilua-5.3.3/src'],
'cflags': ['-Wall', '-fexceptions', '-std=c++11'],
'resinc': [],
'libdir': [],
'lib': [],
'ldflags': ['-ldl', '-lboost_system', '-lboost_filesystem']
},
part_runtime: {
'inc': ['-Ilua-5.3.3/src'],
'cflags': ['-Wall', '-fexceptions', '-fPIC', '-std=c++11'],
'resinc': [],
'libdir': [],
'lib': [],
'ldflags': ['-ldl', '-lboost_system', '-lboost_filesystem', '-shared', '-Wl,-soname']
},
part_extend: {
'inc': ['-Ilua-5.3.3/src'],
'cflags': ['-Wall', '-fexceptions', '-fPIC', '-std=c++11'],
'resinc': [],
'libdir': [],
'lib': [],
'ldflags': ['-lboost_system', '-shared', '-Wl,-soname']
},
part_module: {
'inc': ['-Ilua-5.3.3/src'],
'cflags': ['-Wall', '-fexceptions', '-fPIC', '-std=c++11'],
'resinc': [],
'libdir': [],
'lib': [],
'ldflags': ['-lboost_system', '-shared', '-Wl,-soname']
},
part_core: {
'inc': ['-Ilua-5.3.3/src'],
'cflags': ['-Wall', '-fexceptions', '-fPIC', '-std=c++11'],
'resinc': [],
'libdir': [],
'lib': [],
'ldflags': ['-Lbin/Debug', '-llua', '-lboost_system', '-ldl', '-shared', '-Wl,-soname']
}
}
built_names = {
part_main: '$(EXE_NAME)',
part_runtime: '$(RUN_NAME)',
part_extend: '$(MOD_NAME)',
part_core: '$(CORE_NAME)',
part_module: ''
}
path_names = {
part_runtime: 'runtime',
part_extend: 'extend',
part_main: 'main',
part_core: 'core',
part_module: 'modules'
}
relate_names = {
part_main: 'out',
part_runtime: 'run',
part_extend: 'mod',
part_core: 'core',
part_module: ''
}
inc_names = {
part_runtime: runtime_library,
part_extend: extend_library,
part_core: core_library
}
dependencies = {
part_main: [part_runtime, part_extend, part_core],
part_runtime: [part_extend, part_core],
part_extend: [part_core],
part_core: [],
part_module: [part_extend, part_core]
}
phony = {
'unique': ['clean', 'install', 'uninstall'],
'target': ['before', 'after', 'clean']
}
# Add include paths for all dependencies
for fname in flags:
flags[fname]['inc'].append('-I' + path_names[fname] + '/include')
for d in dependencies[fname]:
flags[fname]['inc'].append('-I' + path_names[d] + '/include')
default_target = 'release'
targets = ['debug', 'release']
def createTarget(f, target, files, modules):
f.write('{0}: before_{0} lua_{0} {3}_{0} $({2}_OBJ_{1}) after_{0}\n\n'.format(target, target.upper(), part_module.upper(), relate_names[part_main]))
beforeTarget(f, target)
luaTarget(f, target)
libTarget(f, target, part_runtime)
libTarget(f, target, part_extend)
libTarget(f, target, part_core)
for m in modules:
moduleTarget(f, target, m, modules[m])
outTarget(f, target)
for ft in files:
for filedict in files[ft]:
srcPath = filedict['src']
objPath = filedict['obj']
cxxFileTarget(f, target, srcPath, objPath, ft)
#if ft == part_module:
# ldFileTarget(f, target, filedict['name'], objPath, ft)
f.write('\n')
afterTarget(f, target)
cleanTarget(f, target)
def beforeTarget(f, target):
f.write('before_' + target + ':\n')
for c in path_names:
f.write(' test -d $(' + c.upper() + '_OUT_' + target.upper() + ') || mkdir -p $(' + c.upper() + '_OUT_' + target.upper() + ')\n')
f.write(' test -d $(' + c.upper() + '_OBJDIR_' + target.upper() + ') || mkdir -p $(' + c.upper() + '_OBJDIR_' + target.upper() + ')\n')
f.write('\n')
def luaTarget(f, target):
f.write('lua_{0}: lua\n'.format(target))
f.write(' cp -u {2} $({1}_OUT_{0})\n'.format(target.upper(), part_main.upper(), os.path.join(lua_src_path, lua_lib)))
f.write(' cd $({1}_OUT_{0}) && ln -sf {2} {3}\n'.format(target.upper(), part_main.upper(), lua_lib, lua_lib_ln))
f.write('\n')
def libTarget(f, target, part_name):
str_dep = ''
str_inc = ''
for dep_n in dependencies[part_name]:
str_dep += '{1}_{0} '.format(target, relate_names[dep_n])
#str_inc += '-l' + inc_names[part_name] + ' '
f.write(('{3}_{0}: before_{0} ' + str_dep + ' $({2}_OBJ_{1})\n').format(target, target.upper(), part_name.upper(), relate_names[part_name]))
f.write((' $(LD) $({1}_LIBDIR_{0}) -o $({1}_OUTFILE_{0}) $({1}_OBJ_{0}) ' + str_inc + ' $({1}_LDFLAGS_{0}),{2} $({1}_LIB_{0})\n').format(target.upper(), part_name.upper(), built_names[part_name]))
f.write('\n')
def outTarget(f, target):
str_dep = ''
str_inc = ''
for dep_n in dependencies[part_main]:
str_dep += '{1}_{0} '.format(target, relate_names[dep_n])
str_inc += '-l' + inc_names[dep_n] + ' '
f.write(('{3}_{0}: before_{0} ' + str_dep + ' $({2}_OBJ_{1})\n').format(target, target.upper(), part_main.upper(), relate_names[part_main]))
f.write((' $(LD) $({1}_LIBDIR_{0}) -o $({1}_OUTFILE_{0}) $({1}_OBJ_{0}) ' + str_inc + ' $({1}_LDFLAGS_{0}) $({1}_LIB_{0})\n').format(target.upper(), part_main.upper()))
f.write('\n')
def cxxFileTarget(f, target, src_file, obj_file, part):
f.write('$({1}_OBJDIR_{0})/{3}: {4}/src/{2}\n'.format(target.upper(), part.upper(), src_file, obj_file, path_names[part]))
f.write(' $(CXX) $({1}_CFLAGS_{0}) $({1}_INC_{0}) -c {4}/src/{2} -o $({1}_OBJDIR_{0})/{3}\n'.format(target.upper(), part.upper(), src_file, obj_file, path_names[part]))
def ldFileTarget(f, target, name, obj_file, part):
f.write(' $(LD) $({1}_LIBDIR_{0}) -o $({1}_OUT_{0})/{2}.mod $({1}_OBJDIR_{0})/{3} $({1}_LDFLAGS_{0}),{2}.mod $({1}_LIB_{0})\n'.format(target.upper(), part.upper(), name, obj_file))
def moduleTarget(f, target, name, files):
str_objs = ''
for fi in files:
str_objs += ('$({1}_OBJDIR_{0})/{2} ').format(target.upper(), part_module.upper(), fi['obj'])
target_name = part_module.lower() + '_' + name.lower() + '_' + target.lower()
f.write(target_name + ': ' + str_objs + '\n')
f.write((' $(LD) $({1}_LIBDIR_{0}) -o $({1}_OUT_{0})/{2}.mod ' + str_objs + ' $({1}_LDFLAGS_{0}),{2}.mod $({1}_LIB_{0})\n\n').format(target.upper(), part_module.upper(), name))
def afterTarget(f, target):
f.write('after_' + target + ':\n'.format(target.upper(), part_runtime.upper()))
f.write(' cp test.lua $({1}_OUT_{0})/test.lua\n'.format(target.upper(), part_runtime.upper()))
f.write(' cp run.sh $({1}_OUT_{0})/run.sh\n'.format(target.upper(), part_runtime.upper()))
f.write('\n')
def cleanTarget(f, target):
f.write('clean_' + target + ':\n')
for c in path_names:
f.write(' rm -rf $({1}_OBJDIR_{0})\n'.format(target.upper(), c.upper()))
f.write(' rm -rf $({1}_OUT_{0})\n'.format(target.upper(), c.upper()))
f.write('\n')
with open(makefile, 'w') as f:
f.write('#------------------------------------------------------------------------------#\n')
f.write('# This makefile was created with python #\n')
f.write('#------------------------------------------------------------------------------#\n')
f.write('\n')
f.write('WORKDIR = `pwd`\n')
f.write('\n')
f.write('CC = gcc\n')
f.write('CXX = g++\n')
f.write('AR = ar\n')
f.write('LD = g++\n')
f.write('WINDRES = windres\n')
f.write('\n')
# set all common definitions
for c in flags:
for item in flags[c]:
f.write(c.upper() + '_' + item.upper() + ' =')
for flag in flags[c][item]:
f.write(' ' + flag)
f.write('\n')
f.write('\n')
# built names
f.write('EXE_NAME = ' + executable + '\n')
f.write('MOD_NAME = ' + extend_library_name + '\n')
f.write('RUN_NAME = ' + runtime_library_name + '\n')
f.write('CORE_NAME = ' + core_library_name + '\n')
f.write('\n')
# Target definitions
for t in targets:
targetName = t[0].upper() + t[1:].lower()
for c in flags:
for item in flags[c]:
f.write(c.upper() + '_' + item.upper() + '_' + t.upper() + ' =')
if t.lower() == 'release' and item.lower() == 'ldflags':
f.write(' -s')
f.write(' $(' + c.upper() + '_' + item.upper() + ')')
if t.lower() == 'debug' and item.lower() == 'cflags':
f.write(' -g')
elif t.lower() == 'release' and item.lower() == 'cflags':
f.write(' -O2')
if item.lower() == 'ldflags' and c == part_main:
f.write(' -Lbin/' + targetName + ' -l' +extend_library + ' -llua')
#if item.lower() == 'ldflags' and c == part_extend:
# f.write(' -Lbin/' + targetName + ' -llua')
f.write('\n')
f.write(c.upper() + '_OBJDIR_' + t.upper() + ' = obj/' + targetName + '/' + c + '\n')
if c != part_module:
f.write(c.upper() + '_OUT_' + t.upper() + ' = bin/' + targetName + '\n')
else:
f.write(c.upper() + '_OUT_' + t.upper() + ' = bin/' + targetName + '/modules\n')
f.write(c.upper() + '_OUTFILE_' + t.upper() + ' = bin/' + targetName + '/' + built_names[c] + '\n')
f.write('\n')
f.write('\n')
# Find all files recursively
src_files = {}
def checkSrcPath(root, path, files):
lpath = root if path is None else os.path.join(root, path)
for o in os.listdir(lpath):
if os.path.isfile(os.path.join(lpath, o)) and o.rfind('.cpp') == len(o) - 4:
on = o.replace('.cpp', '')
files.append({
'name': on,
'path': "" if path is None else path,
'src': o if path is None else os.path.join(path, o),
'obj': on + ".o"
})
if os.path.isdir(os.path.join(lpath, o)):
dpath = o if path is None else os.path.join(path, o)
checkSrcPath(root, dpath, files)
# Find all files for built and libs
for pn in path_names:
src_files[pn] = []
checkSrcPath(os.path.join(path_names[pn], 'src'), None, src_files[pn])
# find structure for all modules
module_names = {}
lpath = os.path.join(path_names[part_module], 'src')
for o in os.listdir(lpath):
if os.path.isfile(os.path.join(lpath, o)) and o.rfind('.cpp') == len(o) - 4:
on = o.replace('.cpp', '')
module_names[on] = [{
'name': on,
'path': lpath,
'src': os.path.join(lpath, o),
'obj': on + ".o"
}]
if os.path.isdir(os.path.join(lpath, o)):
module_names[o] = []
checkSrcPath(lpath, o, module_names[o])
for t in targets:
for c in src_files:
f.write(c.upper() + '_OBJ_' + t.upper() + ' =')
objdir = '$(' + c.upper() + '_OBJDIR_' + t.upper() + ')/'
if c != part_module:
for sf in src_files[c]:
f.write(' ' + objdir + sf['obj'])
f.write('\n')
else:
for mn in module_names:
f.write(' ' + c.lower() + '_' + mn.lower() + '_' + t.lower())
f.write('\n')
f.write('\n')
f.write('\n')
createTarget(f, default_target, src_files, module_names)
for t in targets:
if t == default_target:
continue
createTarget(f, t, src_files, module_names)
f.write('lua:\n')
f.write('\tcd lua-5.3.3 && $(MAKE) linux\n\n')
f.write('clean:')
for t in targets:
f.write(' clean_' + t)
f.write('\n\n')
f.write('install:\n')
f.write('\ttest -d bin/Release || exit 1\n')
f.write('\ttest -d /usr/local/bin || mkdir -p /usr/local/bin\n')
f.write('\tcp bin/Release/$(EXE_NAME) /usr/local/bin/$(EXE_NAME)\n')
f.write('\tchmod +x /usr/local/bin/$(EXE_NAME)\n')
f.write('\ttest -d /usr/local/lib || mkdir -p /usr/local/lib\n')
f.write('\tcp bin/Release/*.so /usr/local/lib/\n')
f.write('\tldconfig\n')
f.write('\tcp -R bin/Release/modules /usr/local/lib/chimera-modules\n')
f.write('\ttest -d /etc/chimera || mkdir -p /etc/chimera\n')
f.write('\techo "LogLevel=Error" > /etc/chimera/solver.ini\n')
f.write('\techo "" >> /etc/chimera/solver.ini\n')
f.write('\techo "[Filesystem]" >> /etc/chimera/solver.ini\n')
f.write('\techo "type=filesystem" >> /etc/chimera/solver.ini\n')
f.write('\techo "path=/usr/local/lib/chimera-modules" >> /etc/chimera/solver.ini\n')
f.write('\n')
f.write('uninstall:\n')
f.write('\trm -rf /usr/local/bin/chimera\n')
f.write('\trm -rf /usr/local/lib/chimera-modules\n')
f.write('\trm -rf /usr/local/lib/libchimera.so\n')
f.write('\trm -rf /usr/local/lib/liblua.so\n')
f.write('\trm -rf /usr/local/lib/libmodule.so\n')
f.write('\trm -rf /usr/local/lib/libruntime.so\n')
f.write('\tldconfig\n')
f.write('\n')
f.write('.PHONY:')
for pt in phony['unique']:
f.write(' ' + pt)
for t in targets:
for pt in phony['target']:
f.write(' ' + pt + '_' + t)
|
afaheem88/rally
|
refs/heads/master
|
tests/unit/cli/commands/test_verify.py
|
2
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime as date
import os.path
import tempfile
import mock
import six
from rally.cli.commands import verify
from rally.common import objects
from rally import consts
from rally import exceptions
from tests.unit import test
class VerifyCommandsTestCase(test.TestCase):
def setUp(self):
super(VerifyCommandsTestCase, self).setUp()
self.verify = verify.VerifyCommands()
self.image1 = mock.Mock()
self.image1.name = "cirros-1"
self.image1.id = "fake_image_id_1"
self.image2 = mock.Mock()
self.image2.id = "fake_image_id_2"
self.image2.name = "cirros-2"
self.flavor1 = mock.Mock()
self.flavor2 = mock.Mock()
self.flavor1.id = "fake_flavor_id_1"
self.flavor2.id = "fake_flavor_id_2"
self.flavor1.ram = 128
self.flavor2.ram = 64
@mock.patch("rally.osclients.Clients")
@mock.patch("rally.api.Verification.verify")
def test_start(self, mock_verification_verify, mock_clients):
deployment_id = "0fba91c6-82d5-4ce1-bd00-5d7c989552d9"
mock_clients().glance().images.list.return_value = [
self.image1, self.image2]
mock_clients().nova().flavors.list.return_value = [
self.flavor1, self.flavor2]
self.verify.start(deployment=deployment_id, do_use=False)
default_set_name = "full"
default_regex = None
mock_verification_verify.assert_called_once_with(
deployment_id, default_set_name, default_regex, None, False)
@mock.patch("rally.osclients.Clients")
@mock.patch("rally.api.Verification.verify")
def test_start_with_user_specified_tempest_config(
self, mock_verification_verify, mock_clients):
deployment_id = "0fba91c6-82d5-4ce1-bd00-5d7c989552d9"
mock_clients().glance().images.list.return_value = [
self.image1, self.image2]
mock_clients().nova().flavors.list.return_value = [
self.flavor1, self.flavor2]
tempest_config = tempfile.NamedTemporaryFile()
self.verify.start(deployment=deployment_id,
tempest_config=tempest_config.name, do_use=False)
default_set_name = "full"
default_regex = None
mock_verification_verify.assert_called_once_with(
deployment_id, default_set_name, default_regex,
tempest_config.name, False)
tempest_config.close()
@mock.patch("rally.api.Verification.verify")
def test_start_with_wrong_set_name(self, mock_verification_verify):
deployment_id = "f2009aae-6ef3-468e-96b2-3c987d584010"
wrong_set_name = "unexpected_value"
self.verify.start(deployment_id, wrong_set_name, do_use=False)
self.assertNotIn(wrong_set_name, consts.TempestTestsSets,
consts.TempestTestsAPI)
self.assertFalse(mock_verification_verify.called)
@mock.patch("rally.api.Verification.import_file")
def test_import_file(self, mock_verification_import_file):
deployment_id = "fake_uuid"
mock_verification_import_file.return_value = (None, None)
self.verify.import_file(deployment=deployment_id, do_use=False)
default_set_name = None
default_log_file = None
mock_verification_import_file.assert_called_once_with(
deployment_id, default_set_name, default_log_file)
@mock.patch("rally.api.Verification.import_file")
def test_import_file_without_defaults(self, mock_verification_import_file):
deployment_id = "fake_uuid"
set_name = "fake_set_name"
log_file = "fake_log_file"
mock_verification_import_file.return_value = (None, None)
self.verify.import_file(deployment=deployment_id, set_name=set_name,
log_file=log_file, do_use=False)
mock_verification_import_file.assert_called_once_with(
deployment_id, set_name, log_file)
@mock.patch("rally.cli.cliutils.print_list")
@mock.patch("rally.common.db.verification_list")
def test_list(self, mock_common_db_verification_list, mock_print_list):
fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures",
"Created at", "Duration", "Status"]
verifications = [{"created_at": date.datetime.now(),
"updated_at": date.datetime.now()}]
mock_common_db_verification_list.return_value = verifications
self.verify.list()
for row in verifications:
self.assertEqual(row["updated_at"] - row["created_at"],
row["duration"])
mock_common_db_verification_list.assert_called_once_with()
mock_print_list.assert_called_once_with(verifications, fields,
sortby_index=fields.index(
"Created at"))
@mock.patch("rally.cli.cliutils.print_list")
@mock.patch("rally.common.db.verification_get")
@mock.patch("rally.common.db.verification_result_get")
@mock.patch("rally.common.objects.Verification")
def test_show(self, mock_objects_verification,
mock_verification_result_get, mock_verification_get,
mock_print_list):
class Test_dummy():
data = {"test_cases": {"test_a": {"name": "test_a", "time": 20,
"status": "PASS"},
"test_b": {"name": "test_b", "time": 20,
"status": "SKIP"},
"test_c": {"name": "test_c", "time": 20,
"status": "FAIL"}}}
verification_id = "39121186-b9a4-421d-b094-6c6b270cf9e9"
total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
"Failures", "Created at", "Status"]
fields = ["name", "time", "status"]
verification = mock.MagicMock()
tests = Test_dummy()
mock_verification_result_get.return_value = tests
mock_verification_get.return_value = verification
mock_objects_verification.return_value = 1
values = [objects.Verification(t)
for t in six.itervalues(tests.data["test_cases"])]
self.verify.show(verification_id)
self.assertEqual([mock.call([verification], fields=total_fields),
mock.call(values, fields, sortby_index=0)],
mock_print_list.call_args_list)
mock_verification_get.assert_called_once_with(verification_id)
mock_verification_result_get.assert_called_once_with(verification_id)
@mock.patch("rally.common.db.verification_result_get",
return_value={"data": {}})
@mock.patch("json.dumps")
def test_results(self, mock_json_dumps, mock_verification_result_get):
verification_uuid = "a0231bdf-6a4e-4daf-8ab1-ae076f75f070"
self.verify.results(verification_uuid, output_html=False,
output_json=True)
mock_verification_result_get.assert_called_once_with(verification_uuid)
mock_json_dumps.assert_called_once_with({}, sort_keys=True, indent=4)
@mock.patch("rally.common.db.verification_result_get")
def test_results_verification_not_found(
self, mock_verification_result_get):
verification_uuid = "9044ced5-9c84-4666-8a8f-4b73a2b62acb"
mock_verification_result_get.side_effect = (
exceptions.NotFoundException()
)
self.assertEqual(self.verify.results(verification_uuid,
output_html=False,
output_json=True), 1)
mock_verification_result_get.assert_called_once_with(verification_uuid)
@mock.patch("rally.cli.commands.verify.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.common.db.verification_result_get",
return_value={"data": {}})
def test_results_with_output_json_and_output_file(
self, mock_verification_result_get, mock_open):
mock_open.side_effect = mock.mock_open()
verification_uuid = "94615cd4-ff45-4123-86bd-4b0741541d09"
self.verify.results(verification_uuid, output_file="results",
output_html=False, output_json=True)
mock_verification_result_get.assert_called_once_with(verification_uuid)
mock_open.assert_called_once_with("results", "wb")
mock_open.side_effect().write.assert_called_once_with("{}")
@mock.patch("rally.cli.commands.verify.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.common.db.verification_result_get")
@mock.patch("rally.verification.tempest.json2html.HtmlOutput")
def test_results_with_output_html_and_output_file(
self, mock_html_output, mock_verification_result_get, mock_open):
verification_uuid = "7140dd59-3a7b-41fd-a3ef-5e3e615d7dfa"
fake_data = {}
results = {"data": fake_data}
mock_verification_result_get.return_value = results
mock_create = mock.Mock(return_value="html_report")
mock_html_output.return_value = mock.Mock(create_report=mock_create)
self.verify.results(verification_uuid, output_html=True,
output_json=False, output_file="results")
mock_verification_result_get.assert_called_once_with(verification_uuid)
mock_html_output.assert_called_once_with(fake_data)
mock_open.assert_called_once_with("results", "wb")
mock_open.side_effect().write.assert_called_once_with("html_report")
@mock.patch("rally.common.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
@mock.patch("json.dumps")
def test_compare(self, mock_json_dumps, mock_verification_result_get):
uuid1 = "8eda1b10-c8a4-4316-9603-8468ff1d1560"
uuid2 = "f6ef0a98-1b18-452f-a6a7-922555c2e326"
self.verify.compare(uuid1, uuid2, output_csv=False, output_html=False,
output_json=True)
fake_data = []
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_verification_result_get.assert_has_calls(calls, True)
mock_json_dumps.assert_called_once_with(fake_data, sort_keys=True,
indent=4)
@mock.patch("rally.common.db.verification_result_get",
side_effect=exceptions.NotFoundException())
def test_compare_verification_not_found(self,
mock_verification_result_get):
uuid1 = "f7dc82da-31a6-4d40-bbf8-6d366d58960f"
uuid2 = "2f8a05f3-d310-4f02-aabf-e1165aaa5f9c"
self.assertEqual(self.verify.compare(uuid1, uuid2, output_csv=False,
output_html=False,
output_json=True), 1)
mock_verification_result_get.assert_called_once_with(uuid1)
@mock.patch("rally.cli.commands.verify.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.common.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
def test_compare_with_output_csv_and_output_file(
self, mock_verification_result_get, mock_open):
fake_string = "Type,Field,Value 1,Value 2,Test Name\r\n"
uuid1 = "5e744557-4c3a-414f-9afb-7d3d8708028f"
uuid2 = "efe1c74d-a632-476e-bb6a-55a9aa9cf76b"
self.verify.compare(uuid1, uuid2, output_file="results",
output_csv=True, output_html=False,
output_json=False)
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_verification_result_get.assert_has_calls(calls, True)
mock_open.assert_called_once_with("results", "wb")
mock_open.side_effect().write.assert_called_once_with(fake_string)
@mock.patch("rally.cli.commands.verify.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.common.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
def test_compare_with_output_json_and_output_file(
self, mock_verification_result_get, mock_open):
fake_json_string = "[]"
uuid1 = "0505e33a-738d-4474-a611-9db21547d863"
uuid2 = "b1908417-934e-481c-8d23-bc0badad39ed"
self.verify.compare(uuid1, uuid2, output_file="results",
output_csv=False, output_html=False,
output_json=True)
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_verification_result_get.assert_has_calls(calls, True)
mock_open.assert_called_once_with("results", "wb")
mock_open.side_effect().write.assert_called_once_with(fake_json_string)
@mock.patch("rally.cli.commands.verify.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.common.db.verification_result_get")
@mock.patch(("rally.verification.tempest."
"compare2html.create_report"), return_value="")
def test_compare_with_output_html_and_output_file(
self, mock_compare2html_create_report,
mock_verification_result_get, mock_open):
uuid1 = "cdf64228-77e9-414d-9d4b-f65e9d62c61f"
uuid2 = "39393eec-1b45-4103-8ec1-631edac4b8f0"
results = {"data": {"test_cases": {}}}
fake_data = []
self.verify.compare(uuid1, uuid2,
output_file="results",
output_csv=False, output_html=True,
output_json=False)
mock_verification_result_get.return_value = results
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_verification_result_get.assert_has_calls(calls, True)
mock_compare2html_create_report.assert_called_once_with(fake_data)
mock_open.assert_called_once_with("results", "wb")
mock_open.side_effect().write.assert_called_once_with("")
@mock.patch("rally.common.fileutils._rewrite_env_file")
@mock.patch("rally.cli.commands.verify.db.verification_get",
return_value=True)
def test_use(self, mock_verification_get, mock__rewrite_env_file):
verification_id = "80422553-5774-44bd-98ac-38bd8c7a0feb"
self.verify.use(verification_id)
mock__rewrite_env_file.assert_called_once_with(
os.path.expanduser("~/.rally/globals"),
["RALLY_VERIFICATION=%s\n" % verification_id])
@mock.patch("rally.cli.commands.verify.db.verification_get")
def test_use_not_found(self, mock_verification_get):
verification_id = "ddc3f8ba-082a-496d-b18f-72cdf5c10a14"
mock_verification_get.side_effect = exceptions.NotFoundException(
uuid=verification_id)
self.assertRaises(exceptions.NotFoundException, self.verify.use,
verification_id)
@mock.patch("rally.api.Verification.configure_tempest")
def test_genconfig(self, mock_verification_configure_tempest):
deployment_id = "14377d10-ca77-4104-aba8-36edebcfc120"
self.verify.genconfig(deployment_id)
mock_verification_configure_tempest.assert_called_once_with(
deployment_id, None, False)
@mock.patch("rally.api.Verification.configure_tempest")
def test_genconfig_with_config_specified(
self, mock_verification_configure_tempest):
deployment_id = "68b501af-a553-431c-83ac-30f93a112231"
tempest_conf = "/tmp/tempest.conf"
self.verify.genconfig(deployment_id, tempest_config=tempest_conf)
mock_verification_configure_tempest.assert_called_once_with(
deployment_id, tempest_conf, False)
@mock.patch("rally.api.Verification.configure_tempest")
def test_genconfig_override_config(
self, mock_verification_configure_tempest):
deployment_id = "cd5b64ad-c12f-4781-a89e-95535b145a11"
self.verify.genconfig(deployment_id, override=True)
mock_verification_configure_tempest.assert_called_once_with(
deployment_id, None, True)
@mock.patch("rally.api.Verification.configure_tempest")
def test_genconfig_with_config_specified_and_override_config(
self, mock_verification_configure_tempest):
deployment_id = "89982aba-efef-48cb-8d94-ca893b4e78a6"
tempest_conf = "/tmp/tempest.conf"
self.verify.genconfig(deployment_id,
tempest_config=tempest_conf, override=True)
mock_verification_configure_tempest.assert_called_once_with(
deployment_id, tempest_conf, True)
@mock.patch("rally.api.Verification.install_tempest")
def test_install(self, mock_verification_install_tempest):
deployment_uuid = "d26ebebc-3a5f-4d0d-9021-0c883bd560f5"
self.verify.install(deployment_uuid)
mock_verification_install_tempest.assert_called_once_with(
deployment_uuid, None)
@mock.patch("rally.api.Verification.install_tempest")
def test_install_with_source_specified(
self, mock_verification_install_tempest):
deployment_uuid = "83514de2-a770-4e28-82dd-2826b725e733"
source = "/tmp/tempest"
self.verify.install(deployment_uuid, source)
mock_verification_install_tempest.assert_called_once_with(
deployment_uuid, source)
@mock.patch("rally.api.Verification.uninstall_tempest")
def test_uninstall(self, mock_verification_uninstall_tempest):
deployment_uuid = "f92e7cb2-9fc7-43d4-a86e-8c924b025404"
self.verify.uninstall(deployment_uuid)
mock_verification_uninstall_tempest.assert_called_once_with(
deployment_uuid)
@mock.patch("rally.api.Verification.reinstall_tempest")
def test_reinstall(self, mock_verification_reinstall_tempest):
deployment_uuid = "05e0879b-9150-4e42-b6a0-3c6e48197cc1"
self.verify.reinstall(deployment_uuid)
mock_verification_reinstall_tempest.assert_called_once_with(
deployment_uuid, None, None)
@mock.patch("rally.api.Verification.reinstall_tempest")
def test_reinstall_with_config_specified(
self, mock_verification_reinstall_tempest):
deployment_uuid = "83514de2-a770-4e28-82dd-2826b725e733"
tempest_conf = "/tmp/tempest.conf"
self.verify.reinstall(deployment_uuid, tempest_config=tempest_conf)
mock_verification_reinstall_tempest.assert_called_once_with(
deployment_uuid, tempest_conf, None)
@mock.patch("rally.api.Verification.reinstall_tempest")
def test_reinstall_with_source_specified(
self, mock_verification_reinstall_tempest):
deployment_uuid = "9de60506-8c7a-409f-9ea6-2900f674532d"
source = "/tmp/tempest"
self.verify.reinstall(deployment_uuid, source=source)
mock_verification_reinstall_tempest.assert_called_once_with(
deployment_uuid, None, source)
@mock.patch("rally.api.Verification.reinstall_tempest")
def test_reinstall_with_config_and_source_specified(
self, mock_verification_reinstall_tempest):
deployment_uuid = "f71fb1e2-c442-4889-aaf8-69754828f5f0"
tempest_conf = "/tmp/tempest.conf"
source = "/tmp/tempest"
self.verify.reinstall(deployment_uuid, tempest_conf, source)
mock_verification_reinstall_tempest.assert_called_once_with(
deployment_uuid, tempest_conf, source)
|
jmerkow/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/conch/test/test_recvline.py
|
42
|
# -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.recvline} and fixtures for testing related
functionality.
"""
import sys, os
from twisted.conch.insults import insults
from twisted.conch import recvline
from twisted.python import reflect, components
from twisted.internet import defer, error
from twisted.trial import unittest
from twisted.cred import portal
from twisted.test.proto_helpers import StringTransport
class Arrows(unittest.TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
# self.p.makeConnection(self.pt)
def test_printableCharacters(self):
"""
When L{HistoricRecvLine} receives a printable character,
it adds it to the current line buffer.
"""
self.p.keystrokeReceived('x', None)
self.p.keystrokeReceived('y', None)
self.p.keystrokeReceived('z', None)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
def test_horizontalArrows(self):
"""
When L{HistoricRecvLine} receives an LEFT_ARROW or
RIGHT_ARROW keystroke it moves the cursor left or right
in the current line buffer, respectively.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
def test_newline(self):
"""
When {HistoricRecvLine} receives a newline, it adds the current
line buffer to the end of its history buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('c')
kR('b')
kR('a')
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('\n')
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123', 'cba'), ()))
def test_verticalArrows(self):
"""
When L{HistoricRecvLine} receives UP_ARROW or DOWN_ARROW
keystrokes it move the current index in the current history
buffer up or down, and resets the current line buffer to the
previous or next line in history, respectively for each.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc'), ('123',)))
self.assertEqual(self.p.currentLineBuffer(), ('123', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz',), ('abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('abc', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
def test_home(self):
"""
When L{HistoricRecvLine} receives a HOME keystroke it moves the
cursor to the beginning of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
self.assertEqual(self.p.currentLineBuffer(), ('', 'hello, world'))
def test_end(self):
"""
When L{HistoricRecvLine} receives a END keystroke it moves the cursor
to the end of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
def test_backspace(self):
"""
When L{HistoricRecvLine} receives a BACKSPACE keystroke it deletes
the character immediately before the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('', 'y'))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('', 'y'))
def test_delete(self):
"""
When L{HistoricRecvLine} receives a DELETE keystroke, it
delets the character immediately after the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('x', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
def test_insert(self):
"""
When not in INSERT mode, L{HistoricRecvLine} inserts the typed
character at the cursor before the next character.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEqual(self.p.currentLineBuffer(), ('xyA', 'z'))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEqual(self.p.currentLineBuffer(), ('xyB', 'Az'))
def test_typeover(self):
"""
When in INSERT mode and upon receiving a keystroke with a printable
character, L{HistoricRecvLine} replaces the character at
the cursor with the typed character rather than inserting before.
Ah, the ironies of INSERT mode.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEqual(self.p.currentLineBuffer(), ('xyA', ''))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEqual(self.p.currentLineBuffer(), ('xyB', ''))
def test_unprintableCharacters(self):
"""
When L{HistoricRecvLine} receives a keystroke for an unprintable
function key with no assigned behavior, the line buffer is unmodified.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
pt = self.pt
for ch in (pt.F1, pt.F2, pt.F3, pt.F4, pt.F5, pt.F6, pt.F7, pt.F8,
pt.F9, pt.F10, pt.F11, pt.F12, pt.PGUP, pt.PGDN):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.protocols import loopback
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + '\n' + self.ps[self.pn])
# An insults API for this would be nice.
left = "\x1b[D"
right = "\x1b[C"
up = "\x1b[A"
down = "\x1b[B"
insert = "\x1b[2~"
home = "\x1b[1~"
delete = "\x1b[3~"
end = "\x1b[4~"
backspace = "\x7f"
from twisted.cred import checkers
try:
from twisted.conch.ssh import userauth, transport, channel, connection, session
from twisted.conch.manhole_ssh import TerminalUser, TerminalSession, TerminalRealm, TerminalSessionTransport, ConchFactory
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = 'session'
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req("vt102", (self.height, self.width, 0, 0), '')
self.conn.sendRequest(self, 'pty-req', term)
self.conn.sendRequest(self, 'shell', '')
self._protocolInstance = self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs)
self._protocolInstance.factory = self
self._protocolInstance.makeConnection(self)
def closed(self):
self._protocolInstance.connectionLost(error.ConnectionDone())
def dataReceived(self, data):
self._protocolInstance.dataReceived(data)
class TestConnection(connection.SSHConnection):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
connection.SSHConnection.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def serviceStarted(self):
self.__channel = SessionChannel(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.openChannel(self.__channel)
def write(self, bytes):
return self.__channel.write(bytes)
class TestAuth(userauth.SSHUserAuthClient):
def __init__(self, username, password, *a, **kw):
userauth.SSHUserAuthClient.__init__(self, username, *a, **kw)
self.password = password
def getPassword(self):
return defer.succeed(self.password)
class TestTransport(transport.SSHClientTransport):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, username, password, width, height, *a, **kw):
# transport.SSHClientTransport.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.username = username
self.password = password
self.width = width
self.height = height
def verifyHostKey(self, hostKey, fingerprint):
return defer.succeed(True)
def connectionSecure(self):
self.__connection = TestConnection(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.requestService(
TestAuth(self.username, self.password, self.__connection))
def write(self, bytes):
return self.__connection.write(bytes)
class TestSessionTransport(TerminalSessionTransport):
def protocolFactory(self):
return self.avatar.conn.transport.factory.serverProtocol()
class TestSession(TerminalSession):
transportFactory = TestSessionTransport
class TestUser(TerminalUser):
pass
components.registerAdapter(TestSession, TestUser, session.ISession)
class LoopbackRelay(loopback.LoopbackRelay):
clearCall = None
def logPrefix(self):
return "LoopbackRelay(%r)" % (self.target.__class__.__name__,)
def write(self, bytes):
loopback.LoopbackRelay.write(self, bytes)
if self.clearCall is not None:
self.clearCall.cancel()
from twisted.internet import reactor
self.clearCall = reactor.callLater(0, self._clearBuffer)
def _clearBuffer(self):
self.clearCall = None
loopback.LoopbackRelay.clearBuffer(self)
class NotifyingExpectableBuffer(helper.ExpectableBuffer):
def __init__(self):
self.onConnection = defer.Deferred()
self.onDisconnection = defer.Deferred()
def connectionMade(self):
helper.ExpectableBuffer.connectionMade(self)
self.onConnection.callback(self)
def connectionLost(self, reason):
self.onDisconnection.errback(reason)
class _BaseMixin:
WIDTH = 80
HEIGHT = 24
def _assertBuffer(self, lines):
receivedLines = str(self.recvlineClient).splitlines()
expectedLines = lines + ([''] * (self.HEIGHT - len(lines) - 1))
self.assertEqual(len(receivedLines), len(expectedLines))
for i in range(len(receivedLines)):
self.assertEqual(
receivedLines[i], expectedLines[i],
str(receivedLines[max(0, i-1):i+1]) +
" != " +
str(expectedLines[max(0, i-1):i+1]))
def _trivialTest(self, input, output):
done = self.recvlineClient.expect("done")
self._testwrite(input)
def finished(ign):
self._assertBuffer(output)
return done.addCallback(finished)
class _SSHMixin(_BaseMixin):
def setUp(self):
if not ssh:
raise unittest.SkipTest("Crypto requirements missing, can't run historic recvline tests over ssh")
u, p = 'testuser', 'testpass'
rlm = TerminalRealm()
rlm.userFactory = TestUser
rlm.chainedProtocolFactory = lambda: insultsServer
ptl = portal.Portal(
rlm,
[checkers.InMemoryUsernamePasswordDatabaseDontUse(**{u: p})])
sshFactory = ConchFactory(ptl)
sshFactory.serverProtocol = self.serverProtocol
sshFactory.startFactory()
recvlineServer = self.serverProtocol()
insultsServer = insults.ServerProtocol(lambda: recvlineServer)
sshServer = sshFactory.buildProtocol(None)
clientTransport = LoopbackRelay(sshServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = insults.ClientProtocol(lambda: recvlineClient)
sshClient = TestTransport(lambda: insultsClient, (), {}, u, p, self.WIDTH, self.HEIGHT)
serverTransport = LoopbackRelay(sshClient)
sshClient.makeConnection(clientTransport)
sshServer.makeConnection(serverTransport)
self.recvlineClient = recvlineClient
self.sshClient = sshClient
self.sshServer = sshServer
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.sshClient.write(bytes)
from twisted.conch.test import test_telnet
class TestInsultsClientProtocol(insults.ClientProtocol,
test_telnet.TestProtocol):
pass
class TestInsultsServerProtocol(insults.ServerProtocol,
test_telnet.TestProtocol):
pass
class _TelnetMixin(_BaseMixin):
def setUp(self):
recvlineServer = self.serverProtocol()
insultsServer = TestInsultsServerProtocol(lambda: recvlineServer)
telnetServer = telnet.TelnetTransport(lambda: insultsServer)
clientTransport = LoopbackRelay(telnetServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = TestInsultsClientProtocol(lambda: recvlineClient)
telnetClient = telnet.TelnetTransport(lambda: insultsClient)
serverTransport = LoopbackRelay(telnetClient)
telnetClient.makeConnection(clientTransport)
telnetServer.makeConnection(serverTransport)
serverTransport.clearBuffer()
clientTransport.clearBuffer()
self.recvlineClient = recvlineClient
self.telnetClient = telnetClient
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.telnetClient.write(bytes)
try:
from twisted.conch import stdio
except ImportError:
stdio = None
class _StdioMixin(_BaseMixin):
def setUp(self):
# A memory-only terminal emulator, into which the server will
# write things and make other state changes. What ends up
# here is basically what a user would have seen on their
# screen.
testTerminal = NotifyingExpectableBuffer()
# An insults client protocol which will translate bytes
# received from the child process into keystroke commands for
# an ITerminalProtocol.
insultsClient = insults.ClientProtocol(lambda: testTerminal)
# A process protocol which will translate stdout and stderr
# received from the child process to dataReceived calls and
# error reporting on an insults client protocol.
processClient = stdio.TerminalProcessProtocol(insultsClient)
# Run twisted/conch/stdio.py with the name of a class
# implementing ITerminalProtocol. This class will be used to
# handle bytes we send to the child process.
exe = sys.executable
module = stdio.__file__
if module.endswith('.pyc') or module.endswith('.pyo'):
module = module[:-1]
args = [exe, module, reflect.qual(self.serverProtocol)]
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
from twisted.internet import reactor
clientTransport = reactor.spawnProcess(processClient, exe, args,
env=env, usePTY=True)
self.recvlineClient = self.testTerminal = testTerminal
self.processClient = processClient
self.clientTransport = clientTransport
# Wait for the process protocol and test terminal to become
# connected before proceeding. The former should always
# happen first, but it doesn't hurt to be safe.
return defer.gatherResults(filter(None, [
processClient.onConnection,
testTerminal.expect(">>> ")]))
def tearDown(self):
# Kill the child process. We're done with it.
try:
self.clientTransport.signalProcess("KILL")
except (error.ProcessExitedAlready, OSError):
pass
def trap(failure):
failure.trap(error.ProcessTerminated)
self.assertEqual(failure.value.exitCode, None)
self.assertEqual(failure.value.status, 9)
return self.testTerminal.onDisconnection.addErrback(trap)
def _testwrite(self, bytes):
self.clientTransport.write(bytes)
class RecvlineLoopbackMixin:
serverProtocol = EchoServer
def testSimple(self):
return self._trivialTest(
"first line\ndone",
[">>> first line",
"first line",
">>> done"])
def testLeftArrow(self):
return self._trivialTest(
insert + 'first line' + left * 4 + "xxxx\ndone",
[">>> first xxxx",
"first xxxx",
">>> done"])
def testRightArrow(self):
return self._trivialTest(
insert + 'right line' + left * 4 + right * 2 + "xx\ndone",
[">>> right lixx",
"right lixx",
">>> done"])
def testBackspace(self):
return self._trivialTest(
"second line" + backspace * 4 + "xxxx\ndone",
[">>> second xxxx",
"second xxxx",
">>> done"])
def testDelete(self):
return self._trivialTest(
"delete xxxx" + left * 4 + delete * 4 + "line\ndone",
[">>> delete line",
"delete line",
">>> done"])
def testInsert(self):
return self._trivialTest(
"third ine" + left * 3 + "l\ndone",
[">>> third line",
"third line",
">>> done"])
def testTypeover(self):
return self._trivialTest(
"fourth xine" + left * 4 + insert + "l\ndone",
[">>> fourth line",
"fourth line",
">>> done"])
def testHome(self):
return self._trivialTest(
insert + "blah line" + home + "home\ndone",
[">>> home line",
"home line",
">>> done"])
def testEnd(self):
return self._trivialTest(
"end " + left * 4 + end + "line\ndone",
[">>> end line",
"end line",
">>> done"])
class RecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, RecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run recvline tests over stdio"
class HistoricRecvlineLoopbackMixin:
serverProtocol = EchoServer
def testUpArrow(self):
return self._trivialTest(
"first line\n" + up + "\ndone",
[">>> first line",
"first line",
">>> first line",
"first line",
">>> done"])
def testDownArrow(self):
return self._trivialTest(
"first line\nsecond line\n" + up * 2 + down + "\ndone",
[">>> first line",
"first line",
">>> second line",
"second line",
">>> second line",
"second line",
">>> done"])
class HistoricRecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run historic recvline tests over stdio"
|
prodromou87/gem5
|
refs/heads/master
|
src/arch/arm/ArmNativeTrace.py
|
27
|
# Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from NativeTrace import NativeTrace
class ArmNativeTrace(NativeTrace):
type = 'ArmNativeTrace'
cxx_class = 'Trace::ArmNativeTrace'
cxx_header = "arch/arm/nativetrace.hh"
stop_on_pc_error = Param.Bool(True,
"Stop M5 if it and statetrace's pcs are different")
|
jakevdp/pelican-plugins
|
refs/heads/master
|
libravatar/test_libravatar.py
|
52
|
"""Unit testing suite for the Libravatar Plugin"""
## Copyright (C) 2015 Rafael Laboissiere <rafael@laboissiere.net>
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Affero Public License as published by
## the Free Software Foundation, either version 3 of the License, or (at
## your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
import os
import re
import unittest
import hashlib
from tempfile import mkdtemp
from shutil import rmtree
from . import libravatar
from pelican import Pelican
from pelican.settings import read_settings
AUTHOR_EMAIL = 'bart.simpson@example.com'
MD5_HASH = hashlib.md5 (AUTHOR_EMAIL).hexdigest ()
LIBRAVATAR_BASE_URL = 'http://cdn.libravatar.org/avatar/'
class TestLibravatarURL (unittest.TestCase):
"""Class for testing the URL output of the Libravatar plugin"""
def setUp (self, override = None):
self.output_path = mkdtemp (prefix = 'pelicantests.')
self.content_path = mkdtemp (prefix = 'pelicantests.')
theme_path = os.path.join (os.path.dirname (os.path.abspath (__file__)),
'test_data', 'theme')
settings = {
'PATH': self.content_path,
'THEME': theme_path,
'OUTPUT_PATH': self.output_path,
'PLUGINS': [libravatar],
'CACHE_CONTENT': False
}
if override:
settings.update (override)
fid = open (os.path.join (self.content_path, 'test.md'), 'w')
fid.write ('Title: Test\nDate:\nEmail: ' + AUTHOR_EMAIL + '\n\n')
fid.close ()
self.settings = read_settings (override = settings)
pelican = Pelican (settings = self.settings)
pelican.run ()
def tearDown (self):
rmtree (self.output_path)
rmtree (self.content_path)
def test_url (self, options = ''):
fid = open (os.path.join (self.output_path, 'test.html'), 'r')
found = False
for line in fid.readlines ():
print line
if re.search (LIBRAVATAR_BASE_URL + MD5_HASH + options, line):
found = True
break
assert found
class TestLibravatarMissing (TestLibravatarURL):
"""Class for testing the Libravatar "missing picture" option"""
def setUp (self, override = None):
self.library = 'wavatar'
TestLibravatarURL.setUp (self,
override = {'LIBRAVATAR_MISSING':
self.library})
def test_url (self):
TestLibravatarURL.test_url (self, '\?d=' + self.library)
class TestLibravatarSize (TestLibravatarURL):
"""Class for testing the Libravatar size option"""
def setUp (self, override = None):
self.size = 100
TestLibravatarURL.setUp (self,
override = {'LIBRAVATAR_SIZE': self.size})
def test_url (self):
TestLibravatarURL.test_url (self, '\?s=' + str (self.size))
|
argv0/cloudstack
|
refs/heads/master
|
cloud-cli/cloudtool/__init__.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on Aug 2, 2010
@author: rudd-o
'''
import sys
import cloudapis as apis
import cloudtool.utils as utils
def main(argv=None):
#import ipdb; ipdb.set_trace()
if argv == None:
argv = sys.argv
prelim_args = [ x for x in argv[0:] if not x.startswith('-') ]
parser = utils.get_parser()
api = __import__("cloudapis")
apis = getattr(api, "implementor")
if len(prelim_args) == 1:
commandlist = utils.get_command_list(apis)
parser.error("you need to specify a command name as the first argument\n\nCommands supported by the %s API:\n"%prelim_args[0] + "\n".join(commandlist))
command = utils.lookup_command_in_api(apis,prelim_args[1])
if not command: parser.error("command %r not supported by the %s API"%(prelim_args[1],prelim_args[0]))
argv = argv[1:]
if len(argv) == 1:
argv.append("--help")
parser = utils.get_parser(apis.__init__,command)
opts,args,api_optionsdict,cmd_optionsdict = parser.parse_args(argv)
try:
api = apis(**api_optionsdict)
except utils.OptParseError,e:
parser.error(str(e))
command = utils.lookup_command_in_api(api,args[0])
# we now discard the first two arguments as those necessarily are the api and command names
args = args[2:]
try: return command(*args,**cmd_optionsdict)
except TypeError,e: parser.error(str(e))
if __name__ == '__main__':
main(argv)
|
Endika/c2c-rd-addons
|
refs/heads/8.0
|
table_generate_csv/wizard/wizard_generate_csv.py
|
4
|
# -*- coding: utf-8 -*-
##############################################
#
# Swing Entwicklung betrieblicher Informationssysteme GmbH
# (<http://www.swing-system.com>)
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# 02-SEP-2011 (GK) created
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################
#import wizard
#import pooler
from openerp.osv import osv
from openerp.tools.translate import _
import base64
class wizard_generate_csv(wizard.interface):
_init_form = \
"""<?xml version="1.0"?>
<form string="Generate CSV files">
<separator colspan="4" string="Generate CSV"/>
<field
name="model_ids"
domain="[('state','=','base')]"
height="200"
width="500"
nolabel="1"/>
</form>
"""
_init_fields = \
{ 'model_ids':
{ 'string' :'Model'
, 'type' :'many2many'
, 'required' : True
, 'relation' : 'ir.model'
}
}
_filter_form = \
"""<?xml version="1.0"?>
<form string="Select Filter">
<field name="attribute" colspan="1" nolabel="1"/>
<field name="compare" colspan="1" nolabel="1"/>
<field name="value" colspan="2" nolabel="1"/>
</form>"""
_filter_fields = \
{ 'attribute':
{ 'string' : 'Attribute'
, 'type' : 'selection'
, 'selection' : []
}
, 'compare':
{ 'string' : 'Comparison'
, 'type' : 'selection'
, 'selection' :
[ ('=','Equal')
, ('!=', 'Not Equal')
, ('>', 'Greater')
, ('>=', 'Greater or Equal')
, ('<', 'Less')
, ('<=', 'Less or Equal')
, ('in', 'In')
]
}
, 'value':
{ 'string' : 'Value'
, 'type' : 'char', 'size': 64
}
}
_delimiter_form = \
"""<?xml version="1.0"?>
<form string="Select Delimiters">
<field name="field_separator" colspan="4"/>
<field name="quote" colspan="4"/>
<field name="decimal_point" colspan="4"/>
<field name="header" colspan="4"/>
</form>"""
_delimiter_fields = \
{ 'field_separator':
{ 'string' : 'Field Separator'
, 'type' : 'char', 'size': 1
, 'required' : True
, 'default' : lambda *a : ","
}
, 'quote':
{ 'string' : 'Quote'
, 'type' : 'char', 'size': 1
, 'required' : True
, 'default' : lambda *a : '"'
}
, 'decimal_point':
{ 'string' : 'Decimal Point'
, 'type' : 'char', 'size': 1
, 'required' : True
, 'default' : lambda *a : "."
}
, 'header':
{ 'string' : 'Include Header'
, 'type' : 'boolean'
, 'required' : True
, 'default' : lambda *a : True
}
}
def _manage_attachments(self, cr, uid, model, text, name, description, context=None):
pool = pooler.get_pool(cr.dbname)
attachment_obj = pool.get('ir.attachment')
title = name.lower().replace(" ", "_")
vals = \
{ 'name' : title
, 'datas' : text
, 'datas_fname' : "%s.csv" % name
, 'res_model' : model._table_name
, 'res_id' : model.id
, 'description' : "%s" % (description, )
}
attachment_obj.create(cr, uid, vals, context=context)
# end def _manage_attachments
def _add_filter(self, form) :
if form and form['attribute'] and form['compare'] :
if self.table_obj._columns[form['attribute']]._type in ("int", "float", "boolean") :
value = form['value'].upper()
else :
value = "'%s'" % form['value']
self._filters.append((form['attribute'], form['compare'], value))
# end def _add_filter
def _generate(self, cr, uid, data, res_get=False) :
pool = pooler.get_pool(cr.dbname)
model_obj = pool.get('ir.model')
if data['model'] == 'ir.model':
model_id = data['ids'][0]
else :
model_id = data['form']['model_ids'][0][2][0]
model = model_obj.browse(cr, uid, model_id)
self.table_obj = pool.get(model.model)
if self.table_obj is not None and not isinstance(self.table_obj, osv.osv_memory) :
self._add_filter(data['form'])
csv = model_obj.generate_csv \
( cr, uid
, self.table_obj
, search = self._filters
, header = data['form']['header']
, field_separator = data['form']['field_separator']
, decimal_point = data['form']['decimal_point']
, quote = data['form']['quote']
, line_separator = "\n"
)
self._manage_attachments \
( cr, uid
, model
, base64.encodestring(csv)
, self.table_obj._name
, " and ".join('"%s" %s %s' % (s[0], s[1], s[2]) for s in self._filters)
)
return {}
# end def _generate
def _filter(self, cr, uid, data, res_get=False) :
pool = pooler.get_pool(cr.dbname)
model_obj = pool.get('ir.model')
if data['model'] == 'ir.model':
model_id = data['ids'][0]
else :
model_id = data['form']['model_ids'][0][2][0]
model = model_obj.browse(cr, uid, model_id)
self.table_obj = pool.get(model.model)
self._filter_fields['attribute']['selection'] = []
if self.table_obj :
for k,v in self.table_obj._columns.iteritems() :
if v._type in ("many2many", "one2many", "related", "function") : continue
if hasattr(v, "_fnct") and v._fnct : continue
self._filter_fields['attribute']['selection'].append((k,k))
return {}
# end def _filter
def _decide(self, cr, uid, data, res_get=False) :
self._filters = []
if data['model'] == 'ir.model':
return 'filter'
else :
return 'form'
# end def _decide
def _decide2(self, cr, uid, data, res_get=False) :
form = data['form']
self._add_filter(form)
return 'filter'
# end def _decide2
states = \
{ 'init' :
{ 'actions' : []
, 'result' :
{ 'type' : 'choice'
, 'next_state' : _decide
}
}
, 'form' :
{ 'actions' : []
, 'result' :
{ 'type' : 'form'
, 'arch' : _init_form
, 'fields' : _init_fields
, 'state' : [('end', 'Cancel'), ('delimiter', 'Select Delimiters'), ('filter', 'Filter')]
}
}
, 'filter' :
{ 'actions' : [_filter]
, 'result' :
{ 'type' : 'form'
, 'arch' : _filter_form
, 'fields' : _filter_fields
, 'state' : [('end', 'Cancel'), ('delimiter', 'Select Delimiters'), ('add_filter', 'Next Filter')]
}
}
, 'add_filter' :
{ 'actions' : []
, 'result' :
{ 'type' : 'choice'
, 'next_state' : _decide2
}
}
, 'delimiter' :
{ 'actions' : []
, 'result' :
{ 'type' : 'form'
, 'arch' : _delimiter_form
, 'fields' : _delimiter_fields
, 'state' : [('end', 'Cancel'), ('generate', 'Generate')]
}
}
, 'generate' :
{ 'actions' : []
, 'result' :
{ 'type' : 'action'
, 'action' : _generate
, 'state' : 'end'
}
}
}
# end class wizard_generate_csv
wizard_generate_csv ('ir.model.wizard_generate_csv')
|
caterinaurban/Lyra
|
refs/heads/master
|
src/lyra/unittests/usage/tricky.py
|
1
|
x: bool = bool(input())
# STATE: x -> U; y -> W
y: bool = bool(input())
if x:
x: bool = x and y
y: bool = False
if x:
x: bool = x and y
y: bool = False
print(y)
|
Mark24Code/python
|
refs/heads/master
|
Mr.Lin/0006/0006.py
|
40
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: 30987
# @Date: 2015-01-13 11:11:49
# @Last Modified by: 30987
# @Last Modified time: 2015-01-13 17:10:27
import re
def hot_words(file_path):
file = open(file_path,'r')
file_content = file.read()
p = re.compile(r'[\W\d]*')
word_list = p.split(file_content)
word_dict = {}
for word in word_list:
if word not in word_dict:
word_dict[word] = 1
else:
word_dict[word] += 1
sort = sorted(word_dict.items(), key=lambda e: e[1], reverse=True)
sort = sorted(word_dict.items(), key=lambda e: e[1], reverse=True)
print("The most word in '%s' is '%s',it appears '%s' times" % (file_path,sort[1][0], sort[1][1]))
file.close()
if __name__ == '__main__':
hot_words('test.txt')
|
frouty/odoo_oph
|
refs/heads/dev_70
|
addons/l10n_uk/__openerp__.py
|
90
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Smartmode LTD (<http://www.smartmode.co.uk>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'UK - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the latest UK OpenERP localisation necessary to run OpenERP accounting for UK SME's with:
=================================================================================================
- a CT600-ready chart of accounts
- VAT100-ready tax structure
- InfoLogic UK counties listing
- a few other adaptations""",
'author': 'SmartMode LTD',
'website': 'http://www.smartmode.co.uk',
'depends': ['base_iban', 'base_vat', 'account_chart'],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account.chart.template.csv',
'data/account.tax.template.csv',
'data/res.country.state.csv',
'l10n_uk_wizard.xml',
],
'demo' : ['demo/demo.xml'],
'installable': 'True',
'images': ['images/config_chart_l10n_uk.jpeg','images/l10n_uk_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tumbl3w33d/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py
|
27
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabpolicy
version_added: "2.8"
short_description: Manage Azure Policy instance
description:
- Create, update and delete instance of Azure Policy.
options:
resource_group:
description:
- The name of the resource group.
required: True
lab_name:
description:
- The name of the lab.
required: True
policy_set_name:
description:
- The name of the policy set.
required: True
name:
description:
- The name of the policy.
required: True
description:
description:
- The description of the policy.
fact_name:
description:
- The fact name of the policy (e.g. C(lab_vm_count), C(lab_vm_size)), MaxVmsAllowedPerLab, etc.
choices:
- 'user_owned_lab_vm_count'
- 'user_owned_lab_premium_vm_count'
- 'lab_vm_count'
- 'lab_premium_vm_count'
- 'lab_vm_size'
- 'gallery_image'
- 'user_owned_lab_vm_count_in_subnet'
- 'lab_target_cost'
threshold:
description:
- The threshold of the policy (it could be either a maximum value or a list of allowed values).
type: raw
state:
description:
- Assert the state of the Policy.
- Use C(present) to create or update an Policy and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create DevTest Lab Policy
azure_rm_devtestlabpolicy:
resource_group: myResourceGroup
lab_name: myLab
policy_set_name: myPolicySet
name: myPolicy
fact_name: user_owned_lab_vm_count
threshold: 5
'''
RETURN = '''
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/policySets/
myPolicySet/policies/myPolicy"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDtlPolicy(AzureRMModuleBase):
"""Configuration class for an Azure RM Policy resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
policy_set_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
description=dict(
type='str'
),
fact_name=dict(
type='str',
choices=['user_owned_lab_vm_count',
'user_owned_lab_premium_vm_count',
'lab_vm_count',
'lab_premium_vm_count',
'lab_vm_size',
'gallery_image',
'user_owned_lab_vm_count_in_subnet',
'lab_target_cost']
),
threshold=dict(
type='raw'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.lab_name = None
self.policy_set_name = None
self.name = None
self.policy = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
required_if = [
('state', 'present', ['threshold', 'fact_name'])
]
super(AzureRMDtlPolicy, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.policy[key] = kwargs[key]
if self.state == 'present':
self.policy['status'] = 'Enabled'
dict_camelize(self.policy, ['fact_name'], True)
if isinstance(self.policy['threshold'], list):
self.policy['evaluator_type'] = 'AllowedValuesPolicy'
else:
self.policy['evaluator_type'] = 'MaxValuePolicy'
response = None
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_policy()
if not old_response:
self.log("Policy instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Policy instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if (not default_compare(self.policy, old_response, '', self.results)):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Policy instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_policy()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Policy instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_policy()
# This currently doesnt' work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Policy instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None),
'status': response.get('status', None)
})
return self.results
def create_update_policy(self):
'''
Creates or updates Policy with the specified configuration.
:return: deserialized Policy instance state dictionary
'''
self.log("Creating / Updating the Policy instance {0}".format(self.name))
try:
response = self.mgmt_client.policies.create_or_update(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name,
policy=self.policy)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Policy instance.')
self.fail("Error creating the Policy instance: {0}".format(str(exc)))
return response.as_dict()
def delete_policy(self):
'''
Deletes specified Policy instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Policy instance {0}".format(self.name))
try:
response = self.mgmt_client.policies.delete(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Policy instance.')
self.fail("Error deleting the Policy instance: {0}".format(str(e)))
return True
def get_policy(self):
'''
Gets the properties of the specified Policy.
:return: deserialized Policy instance state dictionary
'''
self.log("Checking if the Policy instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Policy instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Policy instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, None))
old = sorted(old, key=lambda x: x.get(key, None))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location':
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if str(new) == str(old):
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def dict_map(d, path, map):
if isinstance(d, list):
for i in range(len(d)):
dict_map(d[i], path, map)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = map.get(old_value, old_value)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_map(sd, path[1:], map)
def main():
"""Main execution"""
AzureRMDtlPolicy()
if __name__ == '__main__':
main()
|
shitolepriya/Saloon_erp
|
refs/heads/master
|
erpnext/patches/v5_0/repost_requested_qty.py
|
90
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
from erpnext.stock.stock_balance import update_bin_qty, get_indented_qty
count=0
for item_code, warehouse in frappe.db.sql("""select distinct item_code, warehouse
from `tabMaterial Request Item` where docstatus = 1"""):
try:
count += 1
update_bin_qty(item_code, warehouse, {
"indented_qty": get_indented_qty(item_code, warehouse),
})
if count % 200 == 0:
frappe.db.commit()
except:
frappe.db.rollback()
|
valdecdev/odoo
|
refs/heads/master
|
addons/payment/models/__init__.py
|
45
|
# -*- coding: utf-8 -*-
import payment_acquirer
import res_config
import res_partner
|
maxhutch/pypif
|
refs/heads/master
|
pypif/stats/system/system_stats_wrapper.py
|
2
|
from pypif.stats.common.stats_wrapper import Stats
from pypif.stats.common.stats_wrapper import StatsWrapper
from pypif.stats.common.field_stats import FieldStats
from pypif.stats.common.property_stats_wrapper import PropertyStatsWrapper
class SystemStats(Stats):
"""
Class to store stats of a single system.
"""
def __init__(self, count=None, names=None, chemical_formula=None, properties=None):
"""
Constructor.
:param count: Number of systems of this type.
:param names: Dictionary or :class:`.FieldStats` object with stats of the system names.
:param chemical_formula: Dictionary or :class:`.FieldStats` object with stats of the system chemical formula.
:param properties: Dictionary or :class:`.PropertyStatsWrapper` object with stats of the system properties.
"""
super(SystemStats, self).__init__(count=count)
self.names = self._get_object(FieldStats, names)
self.chemical_formula = self._get_object(FieldStats, chemical_formula)
self.properties = self._get_object(PropertyStatsWrapper, properties)
class SystemStatsWrapper(StatsWrapper):
"""
Class to store stats of systems.
"""
def __init__(self, count=None, common=None):
"""
Constructor.
:param count: Number of systems.
:param common: Dictionary or :class:`.SystemStats` object with the stats of the system.
"""
super(SystemStatsWrapper, self).__init__(count=count)
self.common = self._get_object(SystemStats, common)
|
hasteur/g13bot_tools_new
|
refs/heads/master
|
tests/fixes_tests.py
|
6
|
# -*- coding: utf-8 -*-
"""Tests for fixes module."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import fixes
from tests import unittest, join_data_path
from tests.aspects import TestCase
class TestFixes(TestCase):
"""Test the fixes module."""
net = False
def setUp(self):
"""Backup the current fixes."""
super(TestFixes, self).setUp()
self._old_fixes = fixes.fixes
def tearDown(self):
"""Recover the current fixes."""
fixes.fixes = self._old_fixes
super(TestFixes, self).tearDown()
def test_overwrite_value(self):
"""Test loading a fix file overwriting the fixes."""
fixes.fixes = {}
old_fixes = fixes.fixes
fixes._load_file(join_data_path('set-fixes.py'))
self.assertIsNot(fixes.fixes, old_fixes)
def test_update_value(self):
"""Test loading a fix file changing the fixes."""
fixes.fixes = {}
old_fixes = fixes.fixes
fixes._load_file(join_data_path('fixes.py'))
self.assertIs(fixes.fixes, old_fixes)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
eran-stratoscale/pyracktest
|
refs/heads/master
|
py/strato/racktest/infra/rackattackallocation.py
|
1
|
from rackattack import clientfactory
from rackattack import api
from strato.racktest.infra import config
import logging
from strato.racktest.infra import concurrently
from strato.racktest.infra import suite
from strato.racktest.infra import rootfslabel
import tempfile
import os
import shutil
import time
from strato.racktest.infra import logbeamfromlocalhost
class RackAttackAllocation:
_NO_PROGRESS_TIMEOUT = 2 * 60
def __init__(self, hosts):
self._hosts = hosts
self._overallPercent = 0
self._client = clientfactory.factory()
self._allocation = self._client.allocate(
requirements=self._rackattackRequirements(), allocationInfo=self._rackattackAllocationInfo())
self._allocation.registerProgressCallback(self._progress)
# self._allocation.setForceReleaseCallback()
try:
self._waitForAllocation()
except:
logging.exception("Allocation failed, attempting post mortem")
self._postMortemAllocation()
raise
self._nodes = self._allocation.nodes()
assert suite.runOnEveryHost is None
suite.runOnEveryHost = self.runOnEveryHost
def nodes(self):
return self._nodes
def free(self):
assert suite.runOnEveryHost == self.runOnEveryHost
suite.runOnEveryHost = None
self._allocation.free()
def _rackattackRequirements(self):
result = {}
for name, requirements in self._hosts.iteritems():
rootfs = rootfslabel.RootfsLabel(requirements['rootfs'])
hardwareConstraints = dict(requirements)
del hardwareConstraints['rootfs']
result[name] = api.Requirement(
imageLabel=rootfs.label(), imageHint=rootfs.imageHint(),
hardwareConstraints=hardwareConstraints)
return result
def _rackattackAllocationInfo(self):
nice = 0
nice = max(nice, float(os.environ.get('RACKTEST_MINIMUM_NICE_FOR_RACKATTACK', 0)))
return api.AllocationInfo(user=config.USER, purpose="racktest", nice=nice)
def runOnEveryHost(self, callback, description):
concurrently.run([
dict(callback=callback, args=(name,))
for name in self._nodes])
def _postMortemAllocation(self):
try:
filename, contents = self._allocation.fetchPostMortemPack()
except:
logging.exception("Unable to get post mortem pack from rackattack provider")
return
tempDir = tempfile.mkdtemp()
try:
fullPath = os.path.join(tempDir, filename)
with open(fullPath, 'wb') as f:
f.write(contents)
logbeamfromlocalhost.beam([fullPath])
finally:
shutil.rmtree(tempDir, ignore_errors=True)
logging.info("Beamed post mortem pack into %(filename)s", dict(filename=filename))
def _progress(self, overallPercent, event):
self._overallPercent = overallPercent
def _waitForAllocation(self):
INTERVAL = 5
lastOverallPercent = 0
lastOverallPercentChange = time.time()
while self._allocation.dead() is None:
try:
self._allocation.wait(timeout=INTERVAL)
return
except:
if self._overallPercent != lastOverallPercent:
lastOverallPercent = self._overallPercent
lastOverallPercentChange = time.time()
logging.progress("Allocation %(percent)s%% complete", dict(percent=lastOverallPercent))
if time.time() > lastOverallPercentChange + self._NO_PROGRESS_TIMEOUT:
raise Exception("Allocation progress hanged at %(percent)s%% for %(seconds)s seconds",
dict(percent=lastOverallPercent, seconds=self._NO_PROGRESS_TIMEOUT))
raise Exception(self._allocation.dead())
|
nurmd2/nurmd
|
refs/heads/master
|
addons/sale/res_partner.py
|
46
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields,osv
class res_partner(osv.osv):
_inherit = 'res.partner'
def _sale_order_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,0), ids))
# The current user may not have access rights for sale orders
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.sale_order_ids) + len(partner.mapped('child_ids.sale_order_ids'))
except:
pass
return res
_columns = {
'sale_order_count': fields.function(_sale_order_count, string='# of Sales Order', type='integer'),
'sale_order_ids': fields.one2many('sale.order','partner_id','Sales Order')
}
|
Giftingnation/GN-Oscar-Custom
|
refs/heads/master
|
oscar/apps/dashboard/promotions/app.py
|
4
|
from django.conf.urls import patterns, url
from oscar.core.application import Application
from oscar.apps.dashboard.promotions import views
from oscar.apps.promotions.conf import PROMOTION_CLASSES
class PromotionsDashboardApplication(Application):
name = None
default_permissions = ['is_staff', ]
list_view = views.ListView
page_list = views.PageListView
page_detail = views.PageDetailView
create_redirect_view = views.CreateRedirectView
delete_page_promotion_view = views.DeletePagePromotionView
for klass in PROMOTION_CLASSES:
locals()['create_%s_view' % klass.classname()] = \
getattr(views, 'Create%sView' % klass.__name__)
locals()['update_%s_view' % klass.classname()] = \
getattr(views, 'Update%sView' % klass.__name__)
locals()['delete_%s_view' % klass.classname()] = \
getattr(views, 'Delete%sView' % klass.__name__)
def get_urls(self):
urlpatterns = patterns('',
url(r'^$', self.list_view.as_view(), name='promotion-list'),
url(r'^pages/$', self.page_list.as_view(),
name='promotion-list-by-page'),
url(r'^page/(?P<path>/([\w-]+(/[\w-]+)*/)?)$',
self.page_detail.as_view(), name='promotion-list-by-url'),
url(r'^create/$',
self.create_redirect_view.as_view(),
name='promotion-create-redirect'),
url(r'^page-promotion/(?P<pk>\d+)/$',
self.delete_page_promotion_view.as_view(),
name='pagepromotion-delete'))
for klass in PROMOTION_CLASSES:
code = klass.classname()
urlpatterns += patterns('',
url(r'create/%s/' % code,
getattr(self, 'create_%s_view' % code).as_view(),
name='promotion-create-%s' % code),
url(r'^update/(?P<ptype>%s)/(?P<pk>\d+)/$' % code,
getattr(self, 'update_%s_view' % code).as_view(),
name='promotion-update'),
url(r'^delete/(?P<ptype>%s)/(?P<pk>\d+)/$' % code,
getattr(self, 'delete_%s_view' % code).as_view(),
name='promotion-delete'))
return self.post_process_urls(urlpatterns)
application = PromotionsDashboardApplication()
|
mrpollo/ardupilot
|
refs/heads/master
|
Tools/LogAnalyzer/tests/TestDupeLogData.py
|
273
|
from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print "Checking against index %d" % i
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print "### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0])
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = attEndIndex / 11
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print "Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0])
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print "Checking sample %d" % i
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print "Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine)
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
|
mrakitin/sirepo
|
refs/heads/master
|
tests/uri_router_test.py
|
3
|
# -*- coding: utf-8 -*-
u"""Test sirepo.uri_router
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('srwl_bl')
def test_not_found():
from pykern.pkdebug import pkdp
from pykern.pkunit import pkeq
from sirepo import srunit
fc = srunit.flask_client()
for uri in ('/some random uri', '/srw/wrong-param', '/export-archive'):
resp = fc.get(uri)
pkeq(404, resp.status_code)
def test_uri_for_api():
from sirepo import srunit
def t():
from pykern.pkdebug import pkdp
from pykern.pkunit import pkeq, pkexcept, pkre, pkeq
from sirepo import uri_router
import re
fc = srunit.flask_client()
uri = uri_router.uri_for_api('homePage', params={'path_info': None})
pkre('http://[^/]+/en$', uri)
uri = uri_router.uri_for_api(
'homePage',
params={'path_info': 'terms.html'},
external=False,
)
pkeq('/en/terms.html', uri)
with pkexcept(KeyError):
uri_router.uri_for_api('notAnApi')
with pkexcept('missing parameter'):
uri_router.uri_for_api('exportArchive', {'simulation_type': 'srw'})
srunit.test_in_request(t)
|
moijes12/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/protocols/ident.py
|
20
|
# -*- test-case-name: twisted.test.test_ident -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Ident protocol implementation.
@author: Jean-Paul Calderone
"""
from __future__ import generators
import struct
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log, failure
_MIN_PORT = 1
_MAX_PORT = 2 ** 16 - 1
class IdentError(Exception):
"""
Can't determine connection owner; reason unknown.
"""
identDescription = 'UNKNOWN-ERROR'
def __str__(self):
return self.identDescription
class NoUser(IdentError):
"""
The connection specified by the port pair is not currently in use or
currently not owned by an identifiable entity.
"""
identDescription = 'NO-USER'
class InvalidPort(IdentError):
"""
Either the local or foreign port was improperly specified. This should
be returned if either or both of the port ids were out of range (TCP
port numbers are from 1-65535), negative integers, reals or in any
fashion not recognized as a non-negative integer.
"""
identDescription = 'INVALID-PORT'
class HiddenUser(IdentError):
"""
The server was able to identify the user of this port, but the
information was not returned at the request of the user.
"""
identDescription = 'HIDDEN-USER'
class IdentServer(basic.LineOnlyReceiver):
"""
The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident
Protocol") provides a means to determine the identity of a user of a
particular TCP connection. Given a TCP port number pair, it returns a
character string which identifies the owner of that connection on the
server's system.
Server authors should subclass this class and override the lookup method.
The default implementation returns an UNKNOWN-ERROR response for every
query.
"""
def lineReceived(self, line):
parts = line.split(',')
if len(parts) != 2:
self.invalidQuery()
else:
try:
portOnServer, portOnClient = map(int, parts)
except ValueError:
self.invalidQuery()
else:
if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT:
self.validQuery(portOnServer, portOnClient)
else:
self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient)
def invalidQuery(self):
self.transport.loseConnection()
def validQuery(self, portOnServer, portOnClient):
"""
Called when a valid query is received to look up and deliver the
response.
@param portOnServer: The server port from the query.
@param portOnClient: The client port from the query.
"""
serverAddr = self.transport.getHost().host, portOnServer
clientAddr = self.transport.getPeer().host, portOnClient
defer.maybeDeferred(self.lookup, serverAddr, clientAddr
).addCallback(self._cbLookup, portOnServer, portOnClient
).addErrback(self._ebLookup, portOnServer, portOnClient
)
def _cbLookup(self, (sysName, userId), sport, cport):
self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId))
def _ebLookup(self, failure, sport, cport):
if failure.check(IdentError):
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value))
else:
log.err(failure)
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value)))
def lookup(self, serverAddress, clientAddress):
"""Lookup user information about the specified address pair.
Return value should be a two-tuple of system name and username.
Acceptable values for the system name may be found online at::
U{http://www.iana.org/assignments/operating-system-names}
This method may also raise any IdentError subclass (or IdentError
itself) to indicate user information will not be provided for the
given query.
A Deferred may also be returned.
@param serverAddress: A two-tuple representing the server endpoint
of the address being queried. The first element is a string holding
a dotted-quad IP address. The second element is an integer
representing the port.
@param clientAddress: Like L{serverAddress}, but represents the
client endpoint of the address being queried.
"""
raise IdentError()
class ProcServerMixin:
"""Implements lookup() to grab entries for responses from /proc/net/tcp
"""
SYSTEM_NAME = 'LINUX'
try:
from pwd import getpwuid
def getUsername(self, uid, getpwuid=getpwuid):
return getpwuid(uid)[0]
del getpwuid
except ImportError:
def getUsername(self, uid):
raise IdentError()
def entries(self):
f = file('/proc/net/tcp')
f.readline()
for L in f:
yield L.strip()
def dottedQuadFromHexString(self, hexstr):
return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16)))))
def unpackAddress(self, packed):
addr, port = packed.split(':')
addr = self.dottedQuadFromHexString(addr)
port = int(port, 16)
return addr, port
def parseLine(self, line):
parts = line.strip().split()
localAddr, localPort = self.unpackAddress(parts[1])
remoteAddr, remotePort = self.unpackAddress(parts[2])
uid = int(parts[7])
return (localAddr, localPort), (remoteAddr, remotePort), uid
def lookup(self, serverAddress, clientAddress):
for ent in self.entries():
localAddr, remoteAddr, uid = self.parseLine(ent)
if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]:
return (self.SYSTEM_NAME, self.getUsername(uid))
raise NoUser()
class IdentClient(basic.LineOnlyReceiver):
errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser)
def __init__(self):
self.queries = []
def lookup(self, portOnServer, portOnClient):
"""Lookup user information about the specified address pair.
"""
self.queries.append((defer.Deferred(), portOnServer, portOnClient))
if len(self.queries) > 1:
return self.queries[-1][0]
self.sendLine('%d, %d' % (portOnServer, portOnClient))
return self.queries[-1][0]
def lineReceived(self, line):
if not self.queries:
log.msg("Unexpected server response: %r" % (line,))
else:
d, _, _ = self.queries.pop(0)
self.parseResponse(d, line)
if self.queries:
self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2]))
def connectionLost(self, reason):
for q in self.queries:
q[0].errback(IdentError(reason))
self.queries = []
def parseResponse(self, deferred, line):
parts = line.split(':', 2)
if len(parts) != 3:
deferred.errback(IdentError(line))
else:
ports, type, addInfo = map(str.strip, parts)
if type == 'ERROR':
for et in self.errorTypes:
if et.identDescription == addInfo:
deferred.errback(et(line))
return
deferred.errback(IdentError(line))
else:
deferred.callback((type, addInfo))
__all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser',
'IdentServer', 'IdentClient',
'ProcServerMixin']
|
chienlieu2017/it_management
|
refs/heads/master
|
odoo/addons/l10n_ar/__manifest__.py
|
25
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com)
{
'name': 'Argentina - Accounting',
'version': '2.0',
'description': """
Argentinian accounting chart and tax localization.
==================================================
Plan contable argentino e impuestos de acuerdo a disposiciones vigentes
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization',
'depends': ['base', 'account'],
'data':[
'data/l10n_ar_chart_data.xml',
'data/account_tax_data.xml',
'data/account_chart_template_data.yml',
],
}
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/tests/unit/ml2/db/test_ml2_dvr_db.py
|
14
|
# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sqlalchemy.orm import query
from neutron import context
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import models as ml2_models
from neutron.tests.unit import testlib_api
class Ml2DBTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(Ml2DBTestCase, self).setUp()
self.ctx = context.get_admin_context()
def _setup_neutron_network(self, network_id, port_ids):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
ports = []
for port_id in port_ids:
port = models_v2.Port(id=port_id,
network_id=network_id,
mac_address='foo_mac_address',
admin_state_up=True,
status='ACTIVE',
device_id='',
device_owner='')
self.ctx.session.add(port)
ports.append(port)
return ports
def _setup_neutron_router(self):
with self.ctx.session.begin(subtransactions=True):
router = l3_db.Router()
self.ctx.session.add(router)
return router
def _setup_dvr_binding(self, network_id, port_id, router_id, host_id):
with self.ctx.session.begin(subtransactions=True):
record = ml2_models.DVRPortBinding(
port_id=port_id,
host=host_id,
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status='DOWN')
self.ctx.session.add(record)
return record
def test_ensure_dvr_port_binding_deals_with_db_duplicate(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
router_id = 'foo_router_id'
host_id = 'foo_host_id'
self._setup_neutron_network(network_id, [port_id])
self._setup_dvr_binding(network_id, port_id, router_id, host_id)
with mock.patch.object(query.Query, 'first') as query_first:
query_first.return_value = []
with mock.patch.object(ml2_db.LOG, 'debug') as log_trace:
binding = ml2_db.ensure_dvr_port_binding(
self.ctx.session, port_id, host_id, router_id)
self.assertTrue(query_first.called)
self.assertTrue(log_trace.called)
self.assertEqual(port_id, binding.port_id)
def test_ensure_dvr_port_binding(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
self._setup_neutron_network(network_id, [port_id])
router = self._setup_neutron_router()
ml2_db.ensure_dvr_port_binding(
self.ctx.session, port_id, 'foo_host', router.id)
expected = (self.ctx.session.query(ml2_models.DVRPortBinding).
filter_by(port_id=port_id).one())
self.assertEqual(expected.port_id, port_id)
def test_ensure_dvr_port_binding_multiple_bindings(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
self._setup_neutron_network(network_id, [port_id])
router = self._setup_neutron_router()
ml2_db.ensure_dvr_port_binding(
self.ctx.session, port_id, 'foo_host_1', router.id)
ml2_db.ensure_dvr_port_binding(
self.ctx.session, port_id, 'foo_host_2', router.id)
bindings = (self.ctx.session.query(ml2_models.DVRPortBinding).
filter_by(port_id=port_id).all())
self.assertEqual(2, len(bindings))
def test_delete_dvr_port_binding(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
self._setup_neutron_network(network_id, [port_id])
router = self._setup_neutron_router()
binding = self._setup_dvr_binding(
network_id, port_id, router.id, 'foo_host_id')
ml2_db.delete_dvr_port_binding(
self.ctx.session, port_id, 'foo_host_id')
count = (self.ctx.session.query(ml2_models.DVRPortBinding).
filter_by(port_id=binding.port_id).count())
self.assertFalse(count)
def test_delete_dvr_port_binding_not_found(self):
ml2_db.delete_dvr_port_binding(
self.ctx.session, 'foo_port_id', 'foo_host')
def test_delete_dvr_port_binding_if_stale(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
self._setup_neutron_network(network_id, [port_id])
binding = self._setup_dvr_binding(
network_id, port_id, None, 'foo_host_id')
ml2_db.delete_dvr_port_binding_if_stale(self.ctx.session, binding)
count = (self.ctx.session.query(ml2_models.DVRPortBinding).
filter_by(port_id=binding.port_id).count())
self.assertFalse(count)
def test_get_dvr_port_binding_by_host_not_found(self):
port = ml2_db.get_dvr_port_binding_by_host(
self.ctx.session, 'foo_port_id', 'foo_host_id')
self.assertIsNone(port)
def test_get_dvr_port_bindings_not_found(self):
port = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id')
self.assertFalse(len(port))
def test_get_dvr_port_bindings(self):
network_id = 'foo_network_id'
port_id_1 = 'foo_port_id_1'
port_id_2 = 'foo_port_id_2'
self._setup_neutron_network(network_id, [port_id_1, port_id_2])
router = self._setup_neutron_router()
self._setup_dvr_binding(
network_id, port_id_1, router.id, 'foo_host_id_1')
self._setup_dvr_binding(
network_id, port_id_1, router.id, 'foo_host_id_2')
ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id')
self.assertEqual(2, len(ports))
|
aetilley/scikit-learn
|
refs/heads/master
|
sklearn/utils/estimator_checks.py
|
41
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
|
subutai/nupic.research
|
refs/heads/master
|
projects/transformers/experiments/__init__.py
|
2
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Automatically import models. This will update Transformer's model mappings so that
# custom models can be loaded via AutoModelForMaskedLM and related auto-constructors.
import models
from .ablations import CONFIGS as ABLATIONS
from .base import CONFIGS as BASE
from .bert_replication import CONFIGS as BERT_REPLICATION
from .bertitos import CONFIGS as BERTITOS
from .distillation import CONFIGS as DISTILLATION
from .eighty_percent_sparse import CONFIGS as EIGHT_PERCENT_SPARSE
from .finetuning import CONFIGS as FINETUNING
from .hpsearch import CONFIGS as HPSEARCH
from .one_cycle_lr import CONFIGS as ONE_CYCLE_LR
from .regressions import CONFIGS as REGRESSIONS
from .rigl_bert import CONFIGS as RIGL_BERT
from .sparse_bert import CONFIGS as SPARSE_BERT
from .sparse_bertitos import CONFIGS as SPARSE_BERTITOS
from .trifecta import CONFIGS as TRIFECTA
from .wide_bert import CONFIGS as WIDE_BERT
"""
Import and collect all experiment configurations into one CONFIG
"""
__all__ = ["CONFIGS"]
# Collect all configurations
CONFIGS = dict()
CONFIGS.update(ABLATIONS)
CONFIGS.update(BASE)
CONFIGS.update(BERT_REPLICATION)
CONFIGS.update(BERTITOS)
CONFIGS.update(DISTILLATION)
CONFIGS.update(EIGHT_PERCENT_SPARSE)
CONFIGS.update(FINETUNING)
CONFIGS.update(HPSEARCH)
CONFIGS.update(ONE_CYCLE_LR)
CONFIGS.update(REGRESSIONS)
CONFIGS.update(RIGL_BERT)
CONFIGS.update(SPARSE_BERT)
CONFIGS.update(SPARSE_BERTITOS)
CONFIGS.update(TRIFECTA)
CONFIGS.update(WIDE_BERT)
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.11.29/tests/template_tests/filter_tests/test_floatformat.py
|
11
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal, localcontext
from django.template.defaultfilters import floatformat
from django.test import SimpleTestCase
from django.utils import six
from django.utils.safestring import mark_safe
from ..utils import setup
class FloatformatTests(SimpleTestCase):
@setup({'floatformat01': '{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}{% endautoescape %}'})
def test_floatformat01(self):
output = self.engine.render_to_string('floatformat01', {"a": "1.42", "b": mark_safe("1.42")})
self.assertEqual(output, "1.4 1.4")
@setup({'floatformat02': '{{ a|floatformat }} {{ b|floatformat }}'})
def test_floatformat02(self):
output = self.engine.render_to_string('floatformat02', {"a": "1.42", "b": mark_safe("1.42")})
self.assertEqual(output, "1.4 1.4")
class FunctionTests(SimpleTestCase):
def test_inputs(self):
self.assertEqual(floatformat(7.7), '7.7')
self.assertEqual(floatformat(7.0), '7')
self.assertEqual(floatformat(0.7), '0.7')
self.assertEqual(floatformat(0.07), '0.1')
self.assertEqual(floatformat(0.007), '0.0')
self.assertEqual(floatformat(0.0), '0')
self.assertEqual(floatformat(7.7, 3), '7.700')
self.assertEqual(floatformat(6.000000, 3), '6.000')
self.assertEqual(floatformat(6.200000, 3), '6.200')
self.assertEqual(floatformat(6.200000, -3), '6.200')
self.assertEqual(floatformat(13.1031, -3), '13.103')
self.assertEqual(floatformat(11.1197, -2), '11.12')
self.assertEqual(floatformat(11.0000, -2), '11')
self.assertEqual(floatformat(11.000001, -2), '11.00')
self.assertEqual(floatformat(8.2798, 3), '8.280')
self.assertEqual(floatformat(5555.555, 2), '5555.56')
self.assertEqual(floatformat(001.3000, 2), '1.30')
self.assertEqual(floatformat(0.12345, 2), '0.12')
self.assertEqual(floatformat(Decimal('555.555'), 2), '555.56')
self.assertEqual(floatformat(Decimal('09.000')), '9')
self.assertEqual(floatformat('foo'), '')
self.assertEqual(floatformat(13.1031, 'bar'), '13.1031')
self.assertEqual(floatformat(18.125, 2), '18.13')
self.assertEqual(floatformat('foo', 'bar'), '')
self.assertEqual(floatformat('¿Cómo esta usted?'), '')
self.assertEqual(floatformat(None), '')
self.assertEqual(floatformat(-1.323297138040798e+35, 2), '-132329713804079800000000000000000000.00')
self.assertEqual(floatformat(-1.323297138040798e+35, -2), '-132329713804079800000000000000000000')
self.assertEqual(floatformat(1.5e-15, 20), '0.00000000000000150000')
self.assertEqual(floatformat(1.5e-15, -20), '0.00000000000000150000')
self.assertEqual(floatformat(1.00000000000000015, 16), '1.0000000000000002')
def test_zero_values(self):
self.assertEqual(floatformat(0, 6), '0.000000')
self.assertEqual(floatformat(0, 7), '0.0000000')
self.assertEqual(floatformat(0, 10), '0.0000000000')
self.assertEqual(floatformat(0.000000000000000000015, 20), '0.00000000000000000002')
def test_infinity(self):
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), six.text_type(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), six.text_type(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), six.text_type(nan))
def test_float_dunder_method(self):
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), '11.00')
def test_low_decimal_precision(self):
"""
#15789
"""
with localcontext() as ctx:
ctx.prec = 2
self.assertEqual(floatformat(1.2345, 2), '1.23')
self.assertEqual(floatformat(15.2042, -3), '15.204')
self.assertEqual(floatformat(1.2345, '2'), '1.23')
self.assertEqual(floatformat(15.2042, '-3'), '15.204')
self.assertEqual(floatformat(Decimal('1.2345'), 2), '1.23')
self.assertEqual(floatformat(Decimal('15.2042'), -3), '15.204')
|
pquentin/libcloud
|
refs/heads/trunk
|
docs/examples/dns/create_a_record_for_all_rackspace_nodes.py
|
59
|
from pprint import pprint
from libcloud.compute.providers import get_driver as get_compute_driver
from libcloud.compute.types import Provider as ComputeProvider
from libcloud.dns.providers import get_driver as get_dns_driver
from libcloud.dns.types import Provider as DNSProvider
from libcloud.dns.types import RecordType
CREDENTIALS_RACKSPACE = ('username', 'api key')
CREDENTIALS_ZERIGO = ('email', 'api key')
cls = get_compute_driver(ComputeProvider.RACKSPACE)
compute_driver = cls(*CREDENTIALS_RACKSPACE)
cls = get_dns_driver(DNSProvider.ZERIGO)
dns_driver = cls(*CREDENTIALS_ZERIGO)
# Retrieve all the nodes
nodes = compute_driver.list_nodes()
# Create a new zone
zone = dns_driver.create_zone(domain='mydomain2.com')
created = []
for node in nodes:
name = node.name
ip = node.public_ips[0] if node.public_ips else None
if not ip:
continue
print('Creating %s record (data=%s) for node %s' % ('A', ip, name))
record = zone.create_record(name=name, type=RecordType.A, data=ip)
created.append(record)
print 'Done, created %d records' % (len(created))
pprint(created)
|
jgasthaus/antaresia
|
refs/heads/master
|
nlp/utils.py
|
1
|
from collections import defaultdict
import numpy as np
def getNGramCounts(data, N):
"""Count the occurences of all N+1 grams in the
data. Outputs a dictionary mapping histories of length N
to a histogram dictionary."""
counts = defaultdict(dict)
for i in xrange(N,len(data)):
history = tuple(data[i-N:i])
obs = data[i]
if obs in counts[history]:
counts[history][obs] += 1
else:
counts[history][obs] = 1
return counts
def generateContextSymbolPairs(input, n):
for i in xrange(n,len(input)):
yield (input[i-n:i], input[i])
def computeLogLoss(predictor, data, maxContextLength=1000):
N = len(data)
probabilities = np.zeros(N)
for i in xrange(N):
probabilities[i] = predictor(tuple(data[max(0,i-maxContextLength):i]), data[i])
losses = -np.log(probabilities)/np.log(2)
logloss = np.mean(losses)
perplexity = 2**logloss
return logloss, perplexity, probabilities
|
alistairlow/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/random_ops.py
|
5
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
# pylint: enable=wildcard-import
def _ShapeTensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# pylint: disable=protected-access
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_standard_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("RandomStandardNormal")
def parameterized_truncated_normal(shape,
means=0.0,
stddevs=1.0,
minvals=-2.0,
maxvals=2.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
means: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the truncated normal distribution.
minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
the truncated normal distribution.
maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "parameterized_truncated_normal",
[shape, means, stddevs, minvals, maxvals]) as name:
shape_tensor = _ShapeTensor(shape)
means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._parameterized_truncated_normal(
shape_tensor,
means_tensor,
stddevs_tensor,
minvals_tensor,
maxvals_tensor,
seed=seed1,
seed2=seed2)
return rnd
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._truncated_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("ParameterizedTruncatedNormal")
ops.NotDifferentiable("TruncatedNormal")
def random_uniform(shape,
minval=0,
maxval=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is
floating point.
dtype: The type of the output: 'float16`, `float32`, `float64`, `int32`,
or `int64`.
seed: A Python integer. Used to create a random seed for the distribution.
See @{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
shape = _ShapeTensor(shape)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
seed1, seed2 = random_seed.get_seed(seed)
if dtype.is_integer:
return gen_random_ops._random_uniform_int(
shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
else:
rnd = gen_random_ops._random_uniform(
shape, dtype, seed=seed1, seed2=seed2)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
ops.NotDifferentiable("RandomUniform")
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
to one and only one `output[i]`. For example, a mapping that might occur for a
3x2 tensor is:
```python
[[1, 2], [[5, 6],
[3, 4], ==> [1, 2],
[5, 6]] [3, 4]]
```
Args:
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of same shape and type as `value`, shuffled along its first
dimension.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops._random_shuffle(
value, seed=seed1, seed2=seed2, name=name)
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
@{tf.set_random_seed}
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.name_scope(name, "random_crop", [value, size]) as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = control_flow_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size],
summarize=1000)
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
def multinomial(logits, num_samples, seed=None, name=None):
"""Draws samples from a multinomial distribution.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.multinomial(tf.log([[10., 10.]]), 5)
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "multinomial", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.multinomial(
logits, num_samples, seed=seed1, seed2=seed2)
ops.NotDifferentiable("Multinomial")
def random_gamma(shape,
alpha,
beta=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Draws `shape` samples from each of the given Gamma distribution(s).
`alpha` is the shape parameter describing the distribution(s), and `beta` is
the inverse scale parameter(s).
Example:
samples = tf.random_gamma([10], [0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_gamma([7, 5], [0.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
samples = tf.random_gamma([30], [[1.],[3.],[5.]], beta=[[3., 4.]])
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
Note: Because internal calculations are done using `float64` and casting has
`floor` semantics, we must manually map zero outcomes to the smallest
possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This
means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise
should. This bias can only happen for small values of `alpha`, i.e.,
`alpha << 1` or large values of `beta`, i.e., `beta >> 1`.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per alpha/beta-parameterized distribution.
alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`
provides the shape parameter(s) describing the gamma distribution(s) to
sample. Must be broadcastable with `beta`.
beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.
`beta` provides the inverse scale parameter(s) of the gamma
distribution(s) to sample. Must be broadcastable with `alpha`.
dtype: The type of alpha, beta, and the output: `float16`, `float32`, or
`float64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))`
with values of type `dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype)
beta = ops.convert_to_tensor(
beta if beta is not None else 1, name="beta", dtype=dtype)
alpha_broadcast = alpha + array_ops.zeros_like(beta)
seed1, seed2 = random_seed.get_seed(seed)
return math_ops.maximum(
np.finfo(dtype.as_numpy_dtype).tiny,
gen_random_ops._random_gamma(
shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)
ops.NotDifferentiable("RandomGamma")
def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):
"""Draws `shape` samples from each of the given Poisson distribution(s).
`lam` is the rate parameter describing the distribution(s).
Example:
samples = tf.random_poisson([0.5, 1.5], [10])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_poisson([12.2, 3.3], [7, 5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
Args:
lam: A Tensor or Python value or N-D array of type `dtype`.
`lam` provides the rate parameter(s) describing the poisson
distribution(s) to sample.
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per "rate"-parameterized distribution.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or
`int64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
@{tf.set_random_seed}
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(lam))` with
values of type `dtype`.
"""
with ops.name_scope(name, "random_poisson", [lam, shape]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.random_poisson_v2(
shape, lam, dtype=dtype, seed=seed1, seed2=seed2)
|
erickt/pygments
|
refs/heads/master
|
scripts/check_sources.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Checker for file headers
~~~~~~~~~~~~~~~~~~~~~~~~
Make sure each Python file has a correct file header
including copyright and license information.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os, re
import getopt
import cStringIO
from os.path import join, splitext, abspath
checkers = {}
def checker(*suffixes, **kwds):
only_pkg = kwds.pop('only_pkg', False)
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
func.only_pkg = only_pkg
return func
return deco
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2009 by the Pygments team, '
r'see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
"informations"] # ALLOW-MISSPELLING
@checker('.py')
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
except SyntaxError, err:
yield 0, "not compilable: %s" % err
@checker('.py')
def check_style_and_encoding(fn, lines):
encoding = 'ascii'
for lno, line in enumerate(lines):
if len(line) > 90:
yield lno+1, "line too long"
m = not_ix_re.search(line)
if m:
yield lno+1, '"' + m.group() + '"'
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
if lno < 2:
co = coding_re.search(line)
if co:
encoding = co.group(1)
try:
line.decode(encoding)
except UnicodeDecodeError, err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
except LookupError, err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@checker('.py', only_pkg=True)
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == ['#!/usr/bin/env python\n']:
lines = lines[1:]
c = 2
llist = []
docopen = False
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l == '# -*- coding: rot13 -*-\n':
# special-case pony package
return
elif l != '# -*- coding: utf-8 -*-\n':
yield 1, "missing coding declaration"
elif lno == 1:
if l != '"""\n' and l != 'r"""\n':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == '"""\n':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != "\n" and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:-1] == modname:
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * "~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
yield 0, "missing end and/or start of docstring..."
# check for copyright and license fields
license = llist[-2:-1]
if license != [" :license: BSD, see LICENSE for details.\n"]:
yield 0, "no correct license info"
ci = -3
copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info"
@checker('.py', '.html', '.js')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
if "\t" in line:
yield lno+1, "OMG TABS!!!1 "
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
if word in line and 'ALLOW-MISSPELLING' not in line:
yield lno+1, '"%s" used' % word
bad_tags = ('<b>', '<i>', '<u>', '<s>', '<strike>'
'<center>', '<big>', '<small>', '<font')
@checker('.html')
def check_xhtml(fn, lines):
for lno, line in enumerate(lines):
for bad_tag in bad_tags:
if bad_tag in line:
yield lno+1, "used " + bad_tag
def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
opts = {}
for opt, val in gopts:
if opt == '-i':
val = abspath(val)
opts.setdefault(opt, []).append(val)
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
verbose = '-v' in opts
num = 0
out = cStringIO.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
if '.svn' in dirs:
dirs.remove('.svn')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
# XXX: awkward: for the Makefile call: don't check non-package
# files for file headers
in_pocoo_pkg = root.startswith('./pygments')
for fn in files:
fn = join(root, fn)
if fn[:2] == './': fn = fn[2:]
if '-i' in opts and abspath(fn) in opts['-i']:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print "Checking %s..." % fn
try:
f = open(fn, 'r')
lines = list(f)
except (IOError, OSError), err:
print "%s: cannot open: %s" % (fn, err)
num += 1
continue
for checker in checkerlist:
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
print >>out, "%s:%d: %s" % (fn, lno, msg)
num += 1
if verbose:
print
if num == 0:
print "No errors found."
else:
print out.getvalue().rstrip('\n')
print "%d error%s found." % (num, num > 1 and "s" or "")
return int(num > 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
RevelSystems/django
|
refs/heads/master
|
tests/messages_tests/test_cookie.py
|
37
|
import json
from django.contrib.messages import constants
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.cookie import (
CookieStorage, MessageDecoder, MessageEncoder,
)
from django.test import TestCase, override_settings
from django.utils.safestring import SafeData, mark_safe
from .base import BaseTests
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Sets ``request.COOKIES`` with the encoded data and removes the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Returns an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
if data[-1] == CookieStorage.not_finished:
data.pop()
return len(data)
@override_settings(SESSION_COOKIE_DOMAIN='.example.com', SESSION_COOKIE_SECURE=True, SESSION_COOKIE_HTTPONLY=True)
class CookieTest(BaseTests, TestCase):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_cookie_setings(self):
"""
Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN, SESSION_COOKIE_SECURE and SESSION_COOKIE_HTTPONLY
Refs #15618 and #20972.
"""
# Test before the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
storage.update(response)
self.assertIn('test', response.cookies['messages'].value)
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], '')
self.assertEqual(response.cookies['messages']['secure'], True)
self.assertEqual(response.cookies['messages']['httponly'], True)
# Test deletion of the cookie (storing with an empty value) after the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
for m in storage:
pass # Iterate through the storage to simulate consumption of messages.
storage.update(response)
self.assertEqual(response.cookies['messages'].value, '')
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
Tests that, if the data exceeds what is allowed in a cookie, older
messages are removed before saving (and returned by the ``update``
method).
"""
storage = self.get_storage()
response = self.get_response()
# When storing as a cookie, the cookie has constant overhead of approx
# 54 chars, and each message has a constant overhead of about 37 chars
# and a variable overhead of zero in the best case. We aim for a message
# size which will fit 4 messages into the cookie, but not 5.
# See also FallbackTest.test_session_fallback
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assertEqual(unstored_messages[0].message, '0' * msg_size)
def test_json_encoder_decoder(self):
"""
Tests that a complex nested data structure containing Message
instances is properly encoded/decoded by the custom JSON
encoder/decoder classes.
"""
messages = [
{
'message': Message(constants.INFO, 'Test message'),
'message_list': [Message(constants.INFO, 'message %s')
for x in range(5)] + [{'another-message':
Message(constants.ERROR, 'error')}],
},
Message(constants.INFO, 'message %s'),
]
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
decoded_messages = json.loads(value, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
def encode_decode(data):
message = Message(constants.DEBUG, data)
encoded = storage._encode(message)
decoded = storage._decode(encoded)
return decoded.message
storage = self.get_storage()
self.assertIsInstance(
encode_decode(mark_safe("<b>Hello Django!</b>")), SafeData)
self.assertNotIsInstance(
encode_decode("<b>Hello Django!</b>"), SafeData)
def test_pre_1_5_message_format(self):
"""
For ticket #22426. Tests whether messages that were set in the cookie
before the addition of is_safedata are decoded correctly.
"""
# Encode the messages using the current encoder.
messages = [Message(constants.INFO, 'message %s') for x in range(5)]
encoder = MessageEncoder(separators=(',', ':'))
encoded_messages = encoder.encode(messages)
# Remove the is_safedata flag from the messages in order to imitate
# the behavior of before 1.5 (monkey patching).
encoded_messages = json.loads(encoded_messages)
for obj in encoded_messages:
obj.pop(1)
encoded_messages = json.dumps(encoded_messages, separators=(',', ':'))
# Decode the messages in the old format (without is_safedata)
decoded_messages = json.loads(encoded_messages, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
|
nexusriot/cinder
|
refs/heads/master
|
cinder/volume/drivers/emc/emc_vmax_iscsi.py
|
11
|
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ISCSI Drivers for EMC VMAX arrays based on SMI-S.
"""
import os
from oslo_log import log as logging
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
LOG = logging.getLogger(__name__)
CINDER_CONF = '/etc/cinder/cinder.conf'
class EMCVMAXISCSIDriver(driver.ISCSIDriver):
"""EMC ISCSI Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
2.2.3 - Pool aware scheduler(multi-pool) support
2.2.4 - Create CG from CG snapshot
"""
VERSION = "2.2.4"
def __init__(self, *args, **kwargs):
super(EMCVMAXISCSIDriver, self).__init__(*args, **kwargs)
self.common = (
emc_vmax_common.EMCVMAXCommon('iSCSI',
self.VERSION,
configuration=self.configuration))
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
the format of the driver data is defined in smis_get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': '12345678-1234-4321-1234-123456789012',
}
}
"""
self.common.initialize_connection(
volume, connector)
iscsi_properties = self.smis_get_iscsi_properties(
volume, connector)
LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def smis_do_iscsi_discovery(self, volume):
LOG.info(_LI("ISCSI provider_location not stored, using discovery."))
if not self._check_for_iscsi_ip_address():
LOG.error(_LE(
"You must set your iscsi_ip_address in cinder.conf."))
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
self.configuration.iscsi_ip_address,
run_as_root=True)
LOG.info(_LI(
"smis_do_iscsi_discovery is: %(out)s."),
{'out': out})
targets = []
for target in out.splitlines():
targets.append(target)
return targets
def smis_get_iscsi_properties(self, volume, connector):
"""Gets iscsi configuration.
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the UUID of the volume
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
"""
properties = {}
location = self.smis_do_iscsi_discovery(volume)
if not location:
raise exception.InvalidVolume(_("Could not find iSCSI export "
" for volume %(volumeName)s.")
% {'volumeName': volume['name']})
LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True
device_info = self.common.find_device_number(volume)
if device_info is None or device_info['hostlunid'] is None:
exception_message = (_("Cannot find device number for volume "
"%(volumeName)s.")
% {'volumeName': volume['name']})
raise exception.VolumeBackendAPIException(data=exception_message)
device_number = device_info['hostlunid']
LOG.info(_LI(
"location is: %(location)s"), {'location': location})
for loc in location:
results = loc.split(" ")
properties['target_portal'] = results[0].split(",")[0]
properties['target_iqn'] = results[1]
properties['target_lun'] = device_number
properties['volume_id'] = volume['id']
LOG.info(_LI(
"ISCSI properties: %(properties)s"), {'properties': properties})
LOG.info(_LI(
"ISCSI volume is: %(volume)s"), {'volume': volume})
if 'provider_auth' in volume:
auth = volume['provider_auth']
LOG.info(_LI(
"AUTH properties: %(authProps)s"), {'authProps': auth})
if auth is not None:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
LOG.info(_LI("AUTH properties: %s."), properties)
return properties
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
self.common.terminate_connection(volume, connector)
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter in common.retype
:param host: the host dict holding the relevant target information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
volumes = self.db.volume_get_all_by_group(context, group['id'])
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, self.db)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, self.db)
def _check_for_iscsi_ip_address(self):
"""Check to see if iscsi_ip_address is set in cinder.conf
:returns: boolean -- True if iscsi_ip_address id defined in config.
"""
bExists = os.path.exists(CINDER_CONF)
if bExists:
if 'iscsi_ip_address' in open(CINDER_CONF).read():
return True
return False
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
return self.common.manage_existing(volume, external_ref)
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume):
"""Export VMAX volume and leave volume intact on the backend array."""
return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
"""
return self.common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, self.db)
|
cloudbase/nova-virtualbox
|
refs/heads/virtualbox_driver
|
nova/tests/unit/keymgr/fake.py
|
110
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake key manager."""
from nova.keymgr import mock_key_mgr
def fake_api():
return mock_key_mgr.MockKeyManager()
|
soarpenguin/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/slackpkg.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kim Nørgaard
# Written by Kim Nørgaard <jasen@jasen.dk>
# Based on pkgng module written by bleader <bleader@ratonland.org>
# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: slackpkg
short_description: Package manager for Slackware >= 12.2
description:
- Manage binary packages for Slackware using 'slackpkg' which
is available in versions after 12.2.
version_added: "2.0"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package, you can use "installed" as an alias for C(present) and removed as one for c(absent).
choices: [ 'present', 'absent', 'latest' ]
required: false
default: present
update_cache:
description:
- update the package database first
required: false
default: false
choices: [ true, false ]
author: Kim Nørgaard (@KimNorgaard)
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''
# Install package foo
- slackpkg:
name: foo
state: present
# Remove packages foo and bar
- slackpkg:
name: foo,bar
state: absent
# Make sure that it is the most updated package
- slackpkg:
name: foo
state: latest
'''
def query_package(module, slackpkg_path, name):
import glob
import platform
machine = platform.machine()
packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name,
machine))
if len(packages) > 0:
return True
return False
def remove_packages(module, slackpkg_path, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
package))
if not module.check_mode and query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def upgrade_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_cache(module, slackpkg_path):
rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
if rc != 0:
module.fail_json(msg="Could not update package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']),
name=dict(aliases=["pkg"], required=True, type='list'),
update_cache=dict(default=False, aliases=["update-cache"],
type='bool'),
),
supports_check_mode=True)
slackpkg_path = module.get_bin_path('slackpkg', True)
p = module.params
pkgs = p['name']
if p["update_cache"]:
update_cache(module, slackpkg_path)
if p['state'] == 'latest':
upgrade_packages(module, slackpkg_path, pkgs)
elif p['state'] in ['present', 'installed']:
install_packages(module, slackpkg_path, pkgs)
elif p["state"] in ['removed', 'absent']:
remove_packages(module, slackpkg_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
ECastleton/Popstop
|
refs/heads/master
|
popsicle/popsicle/wsgi.py
|
1
|
"""
WSGI config for popsicle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "popsicle.settings")
application = get_wsgi_application()
|
numpy/numpy-refactor
|
refs/heads/refactor
|
numpy/doc/performance.py
|
100
|
"""
===========
Performance
===========
Placeholder for Improving Performance documentation.
"""
|
GLMeece/faker
|
refs/heads/master
|
faker/cli.py
|
11
|
# coding=utf-8
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import argparse
from faker import Faker, Factory, documentor
from faker import VERSION
from faker.config import AVAILABLE_LOCALES, DEFAULT_LOCALE, META_PROVIDERS_MODULES
if sys.version < '3':
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
__author__ = 'joke2k'
def print_provider(doc, provider, formatters, excludes=None, output=None):
output = output or sys.stdout
if excludes is None:
excludes = []
print(file=output)
print("### {0}".format(
doc.get_provider_name(provider)), file=output)
print(file=output)
for signature, example in formatters.items():
if signature in excludes:
continue
try:
lines = text_type(example).expandtabs().splitlines()
except UnicodeEncodeError:
raise Exception('error on "{0}" with value "{1}"'.format(
signature, example))
margin = max(30, doc.max_name_len+1)
remains = 150 - margin
separator = '#'
for line in lines:
for i in range(0, (len(line) // remains) + 1):
print("\t{fake:<{margin}}{separator} {example}".format(
fake=signature,
separator=separator,
example=line[i*remains:(i+1)*remains],
margin=margin
), file=output)
signature = separator = ' '
def print_doc(provider_or_field=None,
args=None, lang=DEFAULT_LOCALE, output=None, includes=None):
args = args or []
output = output or sys.stdout
fake = Faker(locale=lang, includes=includes)
from faker.providers import BaseProvider
base_provider_formatters = [f for f in dir(BaseProvider)]
if provider_or_field:
if '.' in provider_or_field:
parts = provider_or_field.split('.')
locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else lang
fake = Factory.create(locale, providers=[provider_or_field], includes=includes)
doc = documentor.Documentor(fake)
doc.already_generated = base_provider_formatters
print_provider(
doc,
fake.get_providers()[0],
doc.get_provider_formatters(fake.get_providers()[0]),
output=output)
else:
try:
print(fake.format(provider_or_field, *args), end='', file=output)
except AttributeError:
raise ValueError('No faker found for "{0}({1})"'.format(
provider_or_field, args))
else:
doc = documentor.Documentor(fake)
formatters = doc.get_formatters(with_args=True, with_defaults=True)
for provider, fakers in formatters:
print_provider(doc, provider, fakers, output=output)
for language in AVAILABLE_LOCALES:
if language == lang:
continue
print(file=output)
print('## LANGUAGE {0}'.format(language), file=output)
fake = Faker(locale=language)
d = documentor.Documentor(fake)
for p, fs in d.get_formatters(with_args=True, with_defaults=True,
locale=language,
excludes=base_provider_formatters):
print_provider(d, p, fs, output=output)
class Command(object):
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def execute(self):
"""
Given the command-line arguments, this creates a parser appropriate
to that command, and runs it.
"""
# retrieve default language from system environment
default_locale = os.environ.get('LANG', 'en_US').split('.')[0]
if default_locale not in AVAILABLE_LOCALES:
default_locale = DEFAULT_LOCALE
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(
prog=self.prog_name,
description='{0} version {1}'.format(self.prog_name, VERSION),
formatter_class=formatter_class)
parser.add_argument("--version", action="version",
version="%(prog)s {0}".format(VERSION))
parser.add_argument('-o', metavar="output",
type=argparse.FileType('w'),
default=sys.stdout,
help="redirect output to a file")
parser.add_argument('-l', '--lang',
choices=AVAILABLE_LOCALES,
default=default_locale)
parser.add_argument('-r', '--repeat',
default=1, type=int)
parser.add_argument('-s', '--sep',
default='\n')
parser.add_argument('-i', '--include', default=META_PROVIDERS_MODULES, nargs='*')
parser.add_argument('fake', action='store', nargs='*')
arguments = parser.parse_args(self.argv[1:])
for i in range(arguments.repeat):
fake = arguments.fake[0] if len(arguments.fake) else None
print_doc(fake,
arguments.fake[1:],
lang=arguments.lang,
output=arguments.o,
includes=arguments.include
)
print(arguments.sep, file=arguments.o)
if not fake:
# repeat not supported for all docs
break
def execute_from_command_line(argv=None):
"""A simple method that runs a Command."""
if sys.stdout.encoding is None:
print('please set python env PYTHONIOENCODING=UTF-8, example: '
'export PYTHONIOENCODING=UTF-8, when writing to stdout',
file=sys.stderr)
exit(1)
command = Command(argv)
command.execute()
|
kressi/erpnext
|
refs/heads/develop
|
erpnext/accounts/doctype/cheque_print_template/cheque_print_template.py
|
33
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class ChequePrintTemplate(Document):
pass
@frappe.whitelist()
def create_or_update_cheque_print_format(template_name):
if not frappe.db.exists("Print Format", template_name):
cheque_print = frappe.new_doc("Print Format")
cheque_print.update({
"doc_type": "Payment Entry",
"standard": "No",
"custom_format": 1,
"print_format_type": "Server",
"name": template_name
})
else:
cheque_print = frappe.get_doc("Print Format", template_name)
doc = frappe.get_doc("Cheque Print Template", template_name)
cheque_print.html = """
<div style="position: relative; top:%(starting_position_from_top_edge)scm">
<div style="width:%(cheque_width)scm;height:%(cheque_height)scm;">
<span style="top: {{ %(acc_pay_dist_from_top_edge)s }}cm; left: {{ %(acc_pay_dist_from_left_edge)s }}cm;
border-bottom: solid 1px;border-top:solid 1px; position: absolute;">
%(message_to_show)s
</span>
<span style="top:%(date_dist_from_top_edge)s cm; left:%(date_dist_from_left_edge)scm;
position: absolute;">
{{ frappe.utils.formatdate(doc.reference_date) or '' }}
</span>
<span style="top:%(acc_no_dist_from_top_edge)scm;left:%(acc_no_dist_from_left_edge)scm;
position: absolute;">
{{ doc.account_no or '' }}
</span>
<span style="top:%(payer_name_from_top_edge)scm;left: %(payer_name_from_left_edge)scm;
position: absolute;">
{{doc.party_name}}
</span>
<span style="top:%(amt_in_words_from_top_edge)scm; left:%(amt_in_words_from_left_edge)scm;
position: absolute; display: block; width: %(amt_in_word_width)scm;
line-height:%(amt_in_words_line_spacing)scm; word-wrap: break-word;">
{{frappe.utils.money_in_words(doc.base_paid_amount or doc.base_received_amount)}}
</span>
<span style="top:%(amt_in_figures_from_top_edge)scm;left: %(amt_in_figures_from_left_edge)scm;
position: absolute;">
{{doc.get_formatted("base_paid_amount") or doc.get_formatted("base_received_amount")}}
</span>
<span style="top:%(signatory_from_top_edge)scm;left: %(signatory_from_left_edge)scm;
position: absolute;">
{{doc.company}}
</span>
</div>
</div>"""%{
"starting_position_from_top_edge": doc.starting_position_from_top_edge \
if doc.cheque_size == "A4" else 0.0,
"cheque_width": doc.cheque_width, "cheque_height": doc.cheque_height,
"acc_pay_dist_from_top_edge": doc.acc_pay_dist_from_top_edge,
"acc_pay_dist_from_left_edge": doc.acc_pay_dist_from_left_edge,
"message_to_show": doc.message_to_show if doc.message_to_show else _("Account Pay Only"),
"date_dist_from_top_edge": doc.date_dist_from_top_edge,
"date_dist_from_left_edge": doc.date_dist_from_left_edge,
"acc_no_dist_from_top_edge": doc.acc_no_dist_from_top_edge,
"acc_no_dist_from_left_edge": doc.acc_no_dist_from_left_edge,
"payer_name_from_top_edge": doc.payer_name_from_top_edge,
"payer_name_from_left_edge": doc.payer_name_from_left_edge,
"amt_in_words_from_top_edge": doc.amt_in_words_from_top_edge,
"amt_in_words_from_left_edge": doc.amt_in_words_from_left_edge,
"amt_in_word_width": doc.amt_in_word_width,
"amt_in_words_line_spacing": doc.amt_in_words_line_spacing,
"amt_in_figures_from_top_edge": doc.amt_in_figures_from_top_edge,
"amt_in_figures_from_left_edge": doc.amt_in_figures_from_left_edge,
"signatory_from_top_edge": doc.signatory_from_top_edge,
"signatory_from_left_edge": doc.signatory_from_left_edge
}
cheque_print.save(ignore_permissions=True)
frappe.db.set_value("Cheque Print Template", template_name, "has_print_format", 1)
return cheque_print
|
Whistler092/talk_flask
|
refs/heads/master
|
run.py
|
1
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def init():
return "<h1>Hola Python Cali!</h1>"
if __name__ == "__main__":
app.run(debug=True)
|
reflectometry/osrefl
|
refs/heads/master
|
examples/sawtooth.py
|
1
|
from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, log10
from GISANS_problem import Shape, rectangle, GISANS_problem
#def rectangle(x0, y0, dx, dy, sld=0.0, sldi=0.0):
# #generate points for a rectangle
# rect = Shape('rectangle')
# rect.points = [[x0,y0], [x0+dx, y0], [x0+dx, y0+dy], [x0, y0+dy]]
# rect.sld = sld
# rect.sldi = sldi
# return rect
def sawtooth(z, dz, n=6, x_length=3000.0, base_width=500.0, height=300.0, sld=0.0, sldi=0.0, sld_front=0.0, sldi_front=0.0):
if z>height:
return [], sld_front
width = (z / height) * base_width
front_width = base_width - width
if width == 0:
rects = []
else:
rects = [rectangle(0, base_width*(i+0.5) - width/2.0, x_length, width, sld, sldi) for i in range(n)]
#### below is now taken care of with "matrix" rectangle that surrounds every layer.
# now rectangles for the gaps between the sawtooths...
# if (sld_front !=0.0 and sldi_front != 0.0):
# front_rects = [rectangle(0, 0, x_length, front_width/2.0, sld_front, sldi_front)]
# front_rects.extend([rectangle(0, base_width*(i+0.5)+width/2.0, x_length, front_width, sld_front, sldi_front) for i in range(1,n-1)])
# front_rects.append(rectangle(0, base_width*(n-0.5)+width/2.0, x_length, front_width/2.0, sld_front, sldi_front))
# rects.extend(front_rects)
# now calculate the average SLD (nuclear) for the layer
avg_sld = (width * sld + front_width * sld_front) / base_width
avg_sldi = (width * sldi + front_width * sldi_front) / base_width
return rects, avg_sld, avg_sldi, dz
# rectangles for inplane stripes: have width = 25 nm with
# alternating SLD
def draw_planview(shapes, xview = (0,5000), yview=(0,5000)):
from pylab import plot, figure, draw, Polygon
slds = [shape.sld for shape in shapes]
max_sld = max(slds)
min_sld = min(slds + [0,])
fig = figure()
ax = fig.add_subplot(111)
ax.set_xlim(xview)
ax.set_ylim(yview)
draw()
ps = [Polygon(array(shape.points)) for shape in shapes]
for p in ps:
ax.add_patch(p)
draw()
def draw_sideview(sublayers, yview=(0,5000), zview=(-50,400)):
from pylab import plot, figure, draw, Polygon
dz = [sl[3] for sl in sublayers]
thickness = sum(array(dz))
fig = figure()
ax = fig.add_subplot(111)
ax.set_xlim(yview)
ax.set_ylim(zview)
draw()
z = 0
for sl in sublayers:
for shape in sl[0]:
sp = array(shape.points)
ymax = sp[:,1].max()
ymin = sp[:,1].min()
sideview = array([[ymin, z],[ymax, z],[ymax, z+sl[3]], [ymin, z+sl[3]]])
p = Polygon(sideview)
ax.add_patch(p)
z += sl[3] # dz
draw()
wavelength = 1.24 # x-ray wavelength, Angstroms
Lx = 3000.
Ly = 3000.
front_sld = 0.0 # air
back_sld = pi/(wavelength**2) * 2.0 * 5.0e-6 # substrate
back_sldi = pi/(wavelength**2) * 2.0 * 7.0e-8 # absorption in substrate
delta = [1.0e-6, 3.0e-6] * 6
beta = [1.0e-7, 3.0e-7] * 6
width = [300, 200] * 6 # Angstroms
y0 = cumsum(array(width))
#arange(12.0) * 250
qz = linspace(0.01, 0.41, 501)[newaxis,newaxis,:]
qy = linspace(-0.1, 0.1, 501)[newaxis,:,newaxis]
qx = array([1e-10])[:,newaxis,newaxis]
#qx = ones_like(qy, dtype=complex128) * 1e-8
zs = linspace(0.0, 300.0, 31)
dz = zs[1] - zs[0]
sublayers = [sawtooth(z, dz, sld=back_sld, sldi=back_sldi) for z in zs]
matrix = rectangle(0,0, Lx, Ly, front_sld, 0.0)
g_problem = GISANS_problem(sublayers, matrix, front_sld, 0.0, back_sld, back_sldi, wavelength, qx, qy, qz, Lx, Ly)
|
F5Networks/f5-common-python
|
refs/heads/development
|
devtools/crawler.py
|
1
|
from __future__ import absolute_import
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.devtools.code_generator import DEVICECONFDIR
import json
import os
from pprint import pprint as pp
from urlparse import urlparse
class OCCrawler(object):
def __init__(self, bigip, OC_path_element):
self.bigip = bigip
self.session = self.bigip._meta_data[u"icr_session"]
self.uri = self.bigip._meta_data['uri'] + OC_path_element
self.configs = [self.session.get(self.uri).json()]
self.build_referenced_uris()
def _get_uri_from_OC_item(self, item):
if u"reference" in item and u"link" in item[u"reference"]:
return item[u"reference"][u"link"]\
.replace("localhost",
self.bigip._meta_data[u"hostname"])
def build_referenced_uris(self):
self.referenced = []
for item in self.configs[0][u"items"]:
self.referenced.append(self._get_uri_from_OC_item(item))
def get_referenced_configs(self):
for uri in self.referenced:
self.configs.append(self.session.get(uri).json())
class ConfigWriter(object):
def __init__(self, config_list, complete_oc_name):
self.oc_name = complete_oc_name
self.oc_basename = self.oc_name.split('/')[-1]
self.configs = config_list
def _get_fname(self, conf):
sl = conf[u"selfLink"]
scheme, netloc, path, params, qargs, frags = urlparse(sl)
ps = path.split('/')
if ps[-1] == self.oc_basename:
return self.oc_basename + '_GET'
else:
return self.oc_basename + '_' + ps[-1] + '_GET'
def dump_configs(self):
for conf in self.configs:
fname = self._get_fname(conf)
if not os.path.exists(os.path.join(DEVICECONFDIR, fname)):
outname = os.path.join(DEVICECONFDIR, fname) + ".json"
with open(outname, 'w') as fh:
json.dump(conf, fh)
def main():
from f5.bigip import BigIP
b = BigIP('10.190.5.7', 'admin', 'admin')
occrawler = OCCrawler(b, 'ltm/persistence')
pp(occrawler.referenced)
occrawler.get_referenced_configs()
pp(occrawler.configs)
config_writer = ConfigWriter(occrawler.configs, u"ltm/persistence")
config_writer.dump_configs()
if __name__ == '__main__':
main()
|
tmpgit/intellij-community
|
refs/heads/master
|
python/testData/resolve/LoopToLowerReassignment.py
|
83
|
def f():
while True:
foo = 1
# <ref>
foo = 2
|
unioslo/cerebrum
|
refs/heads/master
|
Cerebrum/modules/virthome/bofhd_auth.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Authentication/permission checking module for VirtHome's bofhd extensions.
This module contains the code necessary to support permission checks for
virthome bofhd operations.
"""
import cereconf
from Cerebrum.Constants import Constants
from Cerebrum.Utils import Factory
from Cerebrum.modules.bofhd import auth
from Cerebrum.modules.bofhd.errors import PermissionDenied
from Cerebrum import Errors
from Cerebrum.Utils import argument_to_sql
class BofhdVirtHomeAuth(auth.BofhdAuth):
"""This class defines a number of permission check/authorisation methods
used by the bofhd framework in VirtHome.
"""
def __init__(self, database):
super(BofhdVirtHomeAuth, self).__init__(database)
# end __init__
def get_permission_holders_on_groups(self, op_set_id, group_id=None, account_id=None):
"""Collect all account-with-permissions-on-group satisfying certain criteria.
The idea is to figure out who has permission set represented by
opset_id on which group, in order to answer questions like 'List all
moderators of this group' or 'which groups do I own?'. The method is
generalised to accept multiple account_ids/group_ids/op_set_ids.
@type op_set_id: int or a non-empty sequence thereof.
FIXME: Should the interface be nicer and allow BofhdAuthOpSet
instances?
FIXME: BofhdAuthRole.list() could be (should be?) fixed to perform
this task.
@type group_id: int or a non-empty sequence thereof.
@type account_id: int or a non-empty sequence thereof
@return:
An iterable over db-rows with entity_ids of the permission
holders. (FIXME: do we want entity_types to go with entity_ids?)
"""
assert not (group_id and account_id), "Cannot specify both"
binds = {"target_type": self.const.auth_target_type_group,
"domain": self.const.group_namespace,
"domain2": self.const.account_namespace,}
where = [argument_to_sql(op_set_id, "ar.op_set_id", binds, int),]
if group_id is not None:
where.append(argument_to_sql(group_id, "aot.entity_id", binds, int))
elif account_id is not None:
where.append(argument_to_sql(account_id, "ar.entity_id", binds, int))
query = ("""
SELECT DISTINCT ar.entity_id as account_id,
aot.entity_id as group_id,
en.entity_name as group_name,
en2.entity_name as account_name,
gi.description
FROM [:table schema=cerebrum name=auth_role] ar
JOIN [:table schema=cerebrum name=auth_op_target] aot
ON ar.op_target_id = aot.op_target_id AND
aot.target_type = :target_type AND""" +
" AND ".join(where) +
"""
JOIN [:table schema=cerebrum name=group_info] gi
ON aot.entity_id = gi.group_id
LEFT OUTER JOIN [:table schema=cerebrum name=entity_name] en
ON en.entity_id = aot.entity_id AND
en.value_domain = :domain
LEFT OUTER JOIN [:table schema=cerebrum name=entity_name] en2
ON en2.entity_id = ar.entity_id AND
en2.value_domain = :domain2
""")
return list(x.dict()
for x in self.query(query, binds,))
# end _get_permission_holders_on_group
def _get_account(self, account_id):
account = Factory.get("Account")(self._db)
try:
account.find(int(account_id))
return account
except Errors.NotFoundError:
return None
# end _get_account
def _get_group(self, ident):
group = Factory.get("Group")(self._db)
try:
if ((isinstance(ident, str) and ident.isdigit()) or
isinstance(ident, (int, long))):
group.find(int(ident))
else:
group.find_by_name(ident)
return group
except Errors.NotFoundError:
return None
# end _get_group
def is_feideuser(self, operator_id):
"""Does operator_id belong to a FEDAccount?
"""
acc = self._get_account(operator_id)
return (acc is not None) and acc.np_type == self.const.fedaccount_type
# end is_feideuser
def is_localuser(self, operator_id):
"""Does operator_id belong to a VirtAccount?
"""
acc = self._get_account(operator_id)
return (acc is not None) and acc.np_type == self.const.virtaccount_type
# end is_feideuser
def is_sudoer(self, operator_id):
"""Can operator_id change identity to another user?
"""
group = self._get_group(cereconf.BOFHD_SUDOERS_GROUP)
return group.has_member(operator_id)
# end is_sudoer
def can_confirm(self, account_id):
"""Can account_id confirm an operation on a virthome account?
(Operation may be e-mail verification, e-mail change, etc.)
FIXME: We need to decide who could issue virtaccount confirmation
requests. Since a confirmation requires possession of a unique random
ID, there is no point in restricting this command -- worse case
scenario a garbage id is fed to bofhd, no big deal.
However, it is entirely possible that we want to restrict confirmation
to some specific web-app-special-system-account.
"""
# everyone can confirm virtaccount operations.
return True
# end can_create_virtaccount
def can_create_fedaccount(self, account_id):
"""Can account_id create a fedaccount?
@type account_id: int
@param account_id
Account id of the account that we want to check fedaccount creation
permissions.
"""
# Superusers only can do that.
if self.is_superuser(account_id):
return True
# Allow webapp to do this.
if self.is_sudoer(account_id):
return True
raise PermissionDenied("id=%s cannot create FEDAccounts" % str(account_id))
# end can_create_fedaccount
def can_su(self, account_id, target_id):
"""Can account_id change identity (i.e. UNIX su) to target_id?
"""
if ((self.is_sudoer(account_id) or self.is_superuser(account_id)) and
# don't want to allow su to superuser (i.e. this means that
# superusers WILL NOT BE able to login via web interface)
not self.is_superuser(target_id)):
return True
raise PermissionDenied("id=%s cannot run 'su'" % str(account_id))
# end can_su
def can_nuke_virtaccount(self, account_id, victim_id):
"""Can account_id delete victim_id?
"""
acc = self._get_account(account_id)
victim = self._get_account(victim_id)
assert victim.np_type == self.const.virtaccount_type
# We allow self-deletion.
if (self.is_superuser(acc.entity_id) or
acc.entity_id == victim.entity_id):
return True
raise PermissionDenied("%s (id=%s) cannot delete %s (id=%s)" %
(acc.account_name, acc.entity_id,
victim.account_name, victim.entity_id))
# end can_nuke_virtaccount
def can_nuke_fedaccount(self, account_id, victim_id):
"""Can account_id delete victim_id?
"""
acc = self._get_account(account_id)
victim = self._get_account(victim_id)
assert victim.np_type == self.const.fedaccount_type
# We allow self-deletion and superuser.
if (self.is_superuser(acc.entity_id) or
acc.entity_id == victim.entity_id):
return True
raise PermissionDenied("%s (id=%s) cannot delete %s (id=%s)" %
(acc.account_name, acc.entity_id,
victim.account_name, victim.entity_id))
# end can_nuke_virtaccount
def can_view_user(self, account_id, victim_id):
"""Can account_id view victim_id's info?
"""
if (self.is_superuser(account_id) or
self.is_sudoer(account_id) or
account_id == victim_id):
return True
raise PermissionDenied("Operation not allowed")
# end can_view_user
def can_create_group(self, account_id, query_run_any=False):
if self.is_superuser(account_id) or self.is_feideuser(account_id):
return True
raise PermissionDenied("Operation not allowed")
# end can_create_group
def can_own_group(self, account_id):
if self.is_superuser(account_id) or self.is_feideuser(account_id):
return True
raise PermissionDenied("Operation not allowed")
# end can_own_group
def can_moderate_group(self, account_id):
"""Can an account be a group moderator?
@type account_id: int
@param account_id
Account id of the account that we want to check moderator
permissions for.
"""
if self.is_superuser(account_id):
return True
account = Factory.get("Account")(self._db)
try:
account.find(account_id)
if account.np_type != self.const.fedaccount_type:
raise PermissionDenied("Account %s (id=%s) cannot moderate "
"VirtGroups" %
(account.account_name, account_id))
return True
except Errors.NotFoundError:
# non-existing accounts cannot do anything :)
raise PermissionDenied("id=%s cannot moderate VirtGroups" %
account_id)
# NOTREACHED
assert False
# end can_moderate_group
def can_change_moderators(self, account_id, group_id):
"""Can an account change (add/remove) moderators from a group?
Group owners and moderators are allowed to alter moderator lists.
"""
return self.can_add_to_group(account_id, group_id)
# end can_change_moderators
def can_change_owners(self, account_id, group_id):
"""Can an account change group_id's owner?
Group owners are allowed to change owners.
"""
# can_delete_group() is available for owners only.
return self.can_force_delete_group(account_id, group_id)
# end can_change_moderators
def can_change_description(self, account_id, group_id):
"""Can an account change group_id's description?
Group owners are allowed to change description.
"""
# can_delete_group() is available for owners only.
return self.can_force_delete_group(account_id, group_id)
# end can_change_moderators
def can_change_resource(self, account_id, group_id):
"""Can an account change group_id's resources (url, etc)?
Group owners are allowed to do that.
"""
return self.can_force_delete_group(account_id, group_id)
# end can_change_url
def can_manipulate_spread(self, account_id, entity_id):
"""Can an account change entity_id's spreads?
FIXME: Whom do we want to have this permission?
"""
if self.is_superuser(account_id):
return True
raise PermissionDenied("Command restricted to superusers")
# end can_manipulate_spread
def can_view_spreads(self, account_id, entity_id):
"""Can an account see entity_id's spreads?
FIXME: Same as for L{can_manipulate_spreads}
"""
if (self.is_superuser(account_id) or
int(account_id) == int(entity_id)):
return True
raise PermissionDenied("Not allowed to view spreads of id=%s" %
entity_id)
# end can_view_spreads
def can_view_requests(self, account_id):
"""Can an account access pending confirmation requests?
"""
if (self.is_superuser(account_id) or
self.is_sudoer(account_id)):
return True
raise PermissionDenied("Not allowed to view requests")
# end can_view_requests
def can_force_delete_group(self, account_id, group_id):
if self.is_superuser(account_id):
return True
if self._has_target_permissions(account_id,
self.const.auth_create_group,
self.const.auth_target_type_group,
group_id, None):
return True
account = self._get_account(account_id)
group = self._get_group(group_id)
raise PermissionDenied("Account %s (id=%s) cannot delete "
"group %s (id=%s)" %
(account and account.account_name or "N/A",
account_id,
group and group.group_name or "N/A",
group_id))
# end can_delete_group
def can_add_to_group(self, account_id, group_id):
if self.is_superuser(account_id):
return True
if self._is_admin_or_moderator(account_id, group_id):
return True
if self._has_target_permissions(account_id,
self.const.auth_alter_group_membership,
self.const.auth_target_type_group,
group_id, None):
return True
account = self._get_account(account_id)
group = self._get_group(group_id)
raise PermissionDenied("Account %s (id=%s) cannot add members for "
"group %s (id=%s)" %
(account and account.account_name or "N/A",
account_id,
group and group.group_name or "N/A",
group_id))
# end can_add_to_group
def can_remove_from_group(self, operator_id, group_id, target_id):
if self.is_superuser(operator_id):
return True
if self._is_admin_or_moderator(operator_id, group_id):
return True
# We allow a user to remove him/herself from a group.
if operator_id == target_id:
return True
# TODO: Decide if we want to keep special permissions through opsets
if self._has_target_permissions(operator_id,
self.const.auth_alter_group_membership,
self.const.auth_target_type_group,
group_id, None):
return True
account = self._get_account(operator_id)
group = self._get_group(group_id)
raise PermissionDenied("Account %s (id=%s) cannot remove members from "
"group %s (id=%s)" %
(account and account.account_name or "N/A",
operator_id,
group and group.group_name or "N/A",
group_id))
# end can_remove_from_group
def can_show_quarantines(self, operator_id, entity_id):
"""Can operator see entity's quarantines?
"""
if self.is_superuser(operator_id):
return True
if operator_id == entity_id:
return True
raise PermissionDenied("Account %s cannot see id=%s's quarantines" %
(operator_id, entity_id))
# end can_show_quarantines
def can_manipulate_quarantines(self, operator_id, victim_id):
"""Check whether operator can add/remove quarantines on victim.
"""
if self.is_superuser(operator_id):
return True
raise PermissionDenied("Account %s can't manipulate id=%s's quarantines"%
(operator_id, victim_id))
# end can_manipulate_quarantines
def can_show_traits(self, operator_id, entity_id):
"""Check whether operator can see entity_id's traits.
"""
if self.is_superuser(operator_id):
return True
if operator_id == entity_id:
return True
raise PermissionDenied("Account %s cannot see id=%s's quarantines" %
(operator_id, entity_id))
# end can_show_quarantines
def can_manipulate_traits(self, operator_id, victim_id):
if self.is_superuser(operator_id):
return True
raise PermissionDenied("Account %s can't manipulate id=%s's traits" %
(operator_id, victim_id))
# end can_manipulate_traits
# end class BofhdAuth
|
kamyu104/LeetCode
|
refs/heads/master
|
Python/remove-nth-node-from-end-of-list.py
|
2
|
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a linked list, remove the nth node from the end of list and return its head.
#
# For example,
#
# Given linked list: 1->2->3->4->5, and n = 2.
#
# After removing the second node from the end, the linked list becomes 1->2->3->5.
# Note:
# Given n will always be valid.
# Try to do this in one pass.
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self is None:
return "Nil"
else:
return "{} -> {}".format(self.val, repr(self.next))
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
dummy = ListNode(-1)
dummy.next = head
slow, fast = dummy, dummy
for i in xrange(n):
fast = fast.next
while fast.next:
slow, fast = slow.next, fast.next
slow.next = slow.next.next
return dummy.next
if __name__ == "__main__":
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print(Solution().removeNthFromEnd(head, 2))
|
kjung/scikit-learn
|
refs/heads/master
|
examples/svm/plot_svm_regression.py
|
120
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
d4nnyk/reverse
|
refs/heads/master
|
reverse/lib/graph.py
|
1
|
#!/usr/bin/env python3
#
# Reverse : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from time import time
from reverse.lib.utils import BRANCH_NEXT, BRANCH_NEXT_JUMP, debug__, list_starts_with
# For the loop's analysis
MAX_NODES = 800
class Graph:
def __init__(self, dis, entry_point_addr):
# Each node contains a block (list) of instructions.
self.nodes = {}
# For each address block, we store a list of next blocks.
# If there are 2 elements it means that the precedent instruction
# was a conditional jump :
# 1st : direct next instruction
# 2nd : for conditional jump : address of the jump
self.link_out = {}
self.link_in = {}
self.entry_point_addr = entry_point_addr
self.dis = dis
# For one loop : contains all address of the loop only
self.loops_set = {}
# For one loop : contains all address of the loop and sub-loops
self.loops_all = {}
# Rest of all address which are not in a loop
self.not_in_loop = set()
self.loops_start = set()
# Optimization
self.cond_jumps_set = set()
self.uncond_jumps_set = set()
self.equiv = {}
self.false_loops = set()
# Loop dependencies
self.deps = {}
self.rev_deps = {}
self.cache_path_exists = {}
# For each loop we search the last node that if we enter in it,
# we are sure to return to the loop.
self.last_loop_node = {}
self.all_deep_equiv = set()
self.skipped_loops_analysis = False
# A jump is normally alone in a block, but for some architectures
# we save the prefetched instruction after.
def new_node(self, curr, prefetch, nxt):
ad = curr.address
self.nodes[ad] = [curr]
if nxt is not None:
self.link_out[ad] = nxt
if nxt is not None:
for n in nxt:
if n not in self.link_in:
self.link_in[n] = [ad]
else:
self.link_in[n].append(ad)
if prefetch is not None:
self.nodes[ad].append(prefetch)
def exists(self, inst):
return inst.address in self.nodes
# Concat instructions in single block
# jumps are in separated blocks
def simplify(self):
nodes = list(self.nodes.keys())
start = time()
for ad in nodes:
if ad in self.uncond_jumps_set or ad in self.cond_jumps_set:
continue
if ad not in self.link_in or len(self.link_in[ad]) != 1 or \
ad == self.entry_point_addr:
continue
pred = self.link_in[ad][0]
# don't fuse with jumps
if pred in self.uncond_jumps_set or pred in self.cond_jumps_set:
continue
if pred not in self.link_out or len(self.link_out[pred]) != 1:
continue
if ad in self.link_out:
self.link_out[pred] = self.link_out[ad]
else:
del self.link_out[pred]
self.nodes[pred] += self.nodes[ad]
if ad in self.link_out:
del self.link_out[ad]
del self.link_in[ad]
del self.nodes[ad]
# replace all addr wich refers to ad
for k, lst_i in self.link_in.items():
if ad in lst_i:
lst_i[lst_i.index(ad)] = pred
elapsed = time()
elapsed = elapsed - start
debug__("Graph simplified in %fs (%d nodes)" % (elapsed, len(self.nodes)))
def dot_loop_deps(self):
output = open("graph_loop_deps.dot", "w+")
output.write('digraph {\n')
output.write('node [fontname="liberation mono" style=filled fillcolor=white shape=box];\n')
for k, dp in self.deps.items():
output.write('node_%x_%x [label="(%x, %x)"' % (k[0], k[1], k[0], k[1]))
if k in self.false_loops:
output.write(' fillcolor="#B6FFDD"')
if k in self.all_deep_equiv:
output.write(' color="#ff0000"')
output.write('];\n')
for sub in dp:
output.write('node_%x_%x -> node_%x_%x;\n'
% (k[0], k[1], sub[0], sub[1]))
output.write('}\n')
output.close()
def dot_graph(self, jmptables):
output = open("graph.dot", "w+")
output.write('digraph {\n')
# output.write('graph [bgcolor="#aaaaaa" pad=20];\n')
# output.write('node [fontname="liberation mono" style=filled fillcolor="#333333" fontcolor="#d3d3d3" shape=box];\n')
output.write('node [fontname="liberation mono" style=filled fillcolor=white shape=box];\n')
keys = list(self.nodes.keys())
keys.sort()
for k in keys:
lst_i = self.nodes[k]
output.write('node_%x [label="' % k)
for i in lst_i:
output.write('0x%x: %s %s\\l' % (i.address, i.mnemonic, i.op_str))
output.write('"')
if k in self.loops_start:
output.write(' fillcolor="#FFFCC4"')
elif k not in self.link_out:
output.write(' fillcolor="#ff7777"')
elif k not in self.link_in:
output.write(' fillcolor="#B6FFDD"')
output.write('];\n')
for k, i in self.link_out.items():
if k in jmptables:
for ad in jmptables[k].table:
output.write('node_%x -> node_%x;\n' % (k, ad))
elif len(i) == 2:
# true green branch (jump is taken)
output.write('node_%x -> node_%x [color="#58DA9C"];\n'
% (k, i[BRANCH_NEXT_JUMP]))
# false red branch (jump is not taken)
output.write('node_%x -> node_%x [color="#ff7777"];\n'
% (k, i[BRANCH_NEXT]))
else:
output.write('node_%x -> node_%x;\n' % (k, i[BRANCH_NEXT]))
output.write('}')
output.close()
def __search_last_loop_node(self, visited, l_prev_loop, l_start, l_set):
def __rec_search(ad):
for prev in self.link_in[ad]:
nxt = self.link_out[prev]
for n in nxt:
if n not in l_set:
if ad not in self.last_loop_node:
self.last_loop_node[ad] = set()
self.last_loop_node[ad].add((l_prev_loop, l_start))
return
if ad in visited:
return
visited.add(ad)
for prev in self.link_in[ad]:
__rec_search(prev)
# start from the end of the loop
ad = l_start
visited.add(ad)
for prev in self.link_in[l_start]:
if prev in l_set:
__rec_search(prev)
def __is_inf_loop(self, l_set):
for ad in l_set:
if ad in self.link_out:
for nxt in self.link_out[ad]:
if nxt not in l_set:
return False
return True
def path_exists(self, from_addr, to_addr):
def __rec_path_exists(curr, local_visited):
stack = [curr]
while stack:
curr = stack.pop(-1)
if curr == to_addr:
return True
if curr in local_visited:
continue
local_visited.add(curr)
if curr not in self.link_out:
continue
for n in self.link_out[curr]:
stack.append(n)
return False
if (from_addr, to_addr) in self.cache_path_exists:
return self.cache_path_exists[(from_addr, to_addr)]
local_visited = set()
res = __rec_path_exists(from_addr, local_visited)
self.cache_path_exists[(from_addr, to_addr)] = res
return res
# Returns a set containing every address which are in paths from
# 'from_addr' to 'to_addr'.
def find_paths(self, from_addr, to_addr, global_visited):
def __rec_find_paths(curr, local_visited, path_set):
nonlocal isfirst
if curr == to_addr and not isfirst:
path_set.add(curr)
return
isfirst = False
if curr in local_visited:
return
local_visited.add(curr)
if curr in global_visited or curr not in self.link_out:
return
for n in self.link_out[curr]:
__rec_find_paths(n, local_visited, path_set)
if n in path_set:
path_set.add(curr)
isfirst = True
path_set = set()
local_visited = set()
__rec_find_paths(from_addr, local_visited, path_set)
return path_set
def __try_find_loops(self, entry, waiting, par_loops, l_set, is_sub_loop):
detected_loops = {}
keys = set(waiting.keys())
for ad in keys:
if l_set is not None and ad not in l_set:
continue
if (entry, ad) in self.loops_set:
continue
l = self.find_paths(ad, ad, par_loops)
# If the set is empty, it's not a loop
if l:
self.loops_set[(entry, ad)] = l
is_sub_loop.add(ad)
detected_loops[ad] = (entry, ad)
return detected_loops
def __manage_waiting(self, stack, visited, waiting, l_set, done):
keys = set(waiting.keys())
for ad in keys:
if l_set is not None and ad not in l_set:
continue
if len(waiting[ad]) == 0:
del waiting[ad]
done.add(ad)
stack.append((-1, ad))
def __until_stack_empty(self, stack, waiting, visited,
par_loops, l_set, is_sub_loop, done):
has_moved = False
while stack:
prev, ad = stack.pop(-1)
if ad in self.link_in and ad not in done:
l_in = self.link_in[ad]
if len(l_in) > 1 or l_set is not None and ad not in l_set:
if ad in waiting:
if prev in waiting[ad]:
waiting[ad].remove(prev)
else:
unseen = set(l_in)
unseen.remove(prev)
waiting[ad] = unseen
continue
if ad in visited:
continue
visited.add(ad)
if ad in self.link_out:
for n in self.link_out[ad]:
if n in par_loops:
continue
stack.append((ad, n))
has_moved = True
return has_moved
def __get_new_loops(self, waiting, detected_loops, l_set, is_sub_loop):
new_loops = set()
# Remove internal links to the beginning of the loop
# If later we enter in the loop it means that len(waiting[ad]) == 0
for ad, k in detected_loops.items():
loop = self.loops_set[k]
was_removed = False
for rest in set(waiting[ad]):
if rest in loop:
waiting[ad].remove(rest)
was_removed = True
if was_removed:
if len(waiting[ad]) == 0:
new_loops.add(ad)
del waiting[ad]
# Remove external jumps which are outside the current loop
for ad, unseen in waiting.items():
if l_set is not None and ad not in l_set:
continue
for i in set(unseen):
if l_set is not None and i not in l_set:
unseen.remove(i)
return new_loops
def __explore(self, entry, par_loops, visited, waiting, l_set, done):
stack = []
# Check if the first address (entry point of the function) is the
# beginning of a loop.
if not visited and entry in self.link_in:
for p in self.link_in[entry]:
stack.append((p, entry))
else:
if entry in self.link_out:
for n in self.link_out[entry]:
stack.append((entry, n))
visited.add(entry)
is_sub_loop = set()
while 1:
if self.__until_stack_empty(
stack, waiting, visited, par_loops, l_set, is_sub_loop, done):
self.__manage_waiting(stack, visited, waiting, l_set, done)
continue
detected_loops = self.__try_find_loops(
entry, waiting, par_loops, l_set, is_sub_loop)
new_loops = self.__get_new_loops(
waiting, detected_loops, l_set, is_sub_loop)
while new_loops:
# Follow loops
for ad in new_loops:
# TODO : optimize
v = set(visited)
v.add(ad)
pl = set(par_loops)
pl.add(ad)
l = self.loops_set[(entry, ad)]
self.__explore(ad, pl, v, waiting, l, set(done))
detected_loops = self.__try_find_loops(
entry, waiting, par_loops, l_set, is_sub_loop)
new_loops = self.__get_new_loops(
waiting, detected_loops, l_set, is_sub_loop)
self.__manage_waiting(stack, visited, waiting, l_set, done)
if not stack:
break
# Now for each current loop, we add the content of each sub-loops.
# It means that a loop contains all sub-loops (which is not the case
# in loops_set : in contains only the current loop).
for ad in is_sub_loop:
loop = set(self.loops_set[(entry, ad)])
self.loops_all[(entry, ad)] = loop
self.deps[(entry, ad)] = set()
for (prev, start), l in self.loops_set.items():
# Skip current loop
if (prev, start) == (entry, ad):
continue
# Is it a sub loop ?
if prev == ad and start != entry and start in loop:
k1 = (entry, ad)
k2 = (prev, start)
if k2 not in self.rev_deps:
self.rev_deps[k2] = set()
self.rev_deps[k2].add(k1)
self.deps[k1].add(k2)
self.loops_all[(entry, ad)].update(self.loops_all[(prev, start)])
def all_false(self, loops_key):
for k in loops_key:
if k not in self.false_loops:
return False
return True
# Mark recursively parent loops
def __rec_mark_parent_false(self, k):
self.false_loops.add(k)
if k not in self.rev_deps:
return
for par in self.rev_deps[k]:
if par in self.false_loops:
continue
if self.all_false(self.deps[par]):
self.__rec_mark_parent_false(par)
# Mark recursively sub loops
def __rec_mark_sub_false(self, k):
self.false_loops.add(k)
for sub in self.deps[k]:
if sub in self.false_loops:
continue
self.__rec_mark_sub_false(sub)
def __yield_cmp_loops(self, keys1, not_in_false=True):
# optim: don't compare twice two loops
keys2 = set(keys1)
for k1 in keys1:
keys2.remove(k1)
if not_in_false and k1 in self.false_loops:
continue
for k2 in keys2:
if not_in_false and k2 in self.false_loops:
continue
yield k1, k2
def __search_false_loops(self):
#
# Try to detect "strange" loops:
#
# example :
#
# if {
# goto label
# }
#
# while {
# if {
# statement_1
# label:
# statement_2
# } else {
# statement_3
# }
# }
#
# Check for example gotoinloop6 to see the result.
#
for (prev1, start1), (prev2, start2) in \
self.__yield_cmp_loops(self.loops_all.keys()):
l1 = self.loops_set[(prev1, start1)]
l2 = self.loops_set[(prev2, start2)]
if prev2 in l1 and \
start2 in l1 and \
start1 in l2:
if l2.issubset(l1):
self.__rec_mark_parent_false((prev2, start2))
self.__rec_mark_sub_false((prev2, start2))
elif prev1 in l2 and \
start1 in l2 and \
start2 in l1:
if l1.issubset(l2):
self.__rec_mark_parent_false((prev1, start1))
self.__rec_mark_sub_false((prev1, start1))
def __search_same_deep_equiv_loops(self):
#
# Search equivalent loops at the same deep, but compare with
# 'loops_all' -> each item contains all sub-loops instead of
# 'loops_set' wich contains only the loop.
#
# example:
#
# loop1
# / \
# loop2 loop3
#
# If loops_all[loop2] == loops_all[loop3], and if loop2 or loop3 is
# in false_loops, we removed these loops.
#
def do_add(k1, k2):
nonlocal idx_count, set_index, deep_equiv
l1 = self.loops_all[k1]
l2 = self.loops_all[k2]
if l1 == l2:
if k1 in set_index:
i = set_index[k1]
deep_equiv[i].add(k2)
self.all_deep_equiv.add(k2)
set_index[k2] = i
elif k2 in set_index:
i = set_index[k2]
deep_equiv[i].add(k1)
self.all_deep_equiv.add(k1)
set_index[k1] = i
else:
i = idx_count
idx_count += 1
deep_equiv[i] = {k1, k2}
set_index[k1] = i
set_index[k2] = i
self.all_deep_equiv.add(k1)
self.all_deep_equiv.add(k2)
set_index = {}
deep_equiv = {}
idx_count = 0
for k in self.deps:
for k1, k2 in self.__yield_cmp_loops(self.deps[k], False):
do_add(k1, k2)
for k1, k2 in self.__yield_cmp_loops(self.roots, False):
do_add(k1, k2)
if not deep_equiv:
return
last_length = 0
while last_length != len(self.false_loops):
last_length = len(self.false_loops)
for i, keys in deep_equiv.items():
nb_false = 0
for k in keys:
if k in self.false_loops:
nb_false += 1
if nb_false > 0:
for k in set(keys):
if k in self.false_loops:
continue
subs = self.deps[k]
if len(subs) == 0 or self.all_false(subs):
keys.remove(k)
self.__rec_mark_parent_false(k)
def __prune_loops(self):
def set_paths(k, p):
nonlocal deps, loop_paths
loop_paths[k].append(p)
i = 0
for sub in deps[k]:
set_paths(sub, p + [i])
i += 1
#
# Create loop paths
# example:
#
# loop1
# / \
# loop2 loop3
#
# paths:
# loop1 = [0]
# loop2 = [0, 0]
# loop3 = [0, 1]
#
deps = self.deps
loop_paths = {}
for k in deps:
deps[k] = list(deps[k])
loop_paths[k] = []
i = 0
for k in self.roots:
set_paths(k, [i])
i += 1
# If there are more than one path for a loop, it means
# that a loop has more than one parent. The goal is to
# determine which parent is "wrong". If there is two parents
# we can't say anything.
prefix_to_remove = []
for k, paths in loop_paths.items():
if len(paths) > 2:
stop_at_first_diff = False
i = 0
prefix = []
while not stop_at_first_diff:
count = {}
for p in paths:
curr = p[i]
if curr in count:
count[curr] += 1
else:
count[curr] = 1
if len(count) > 1:
# Keep only the parent loop which has only one reference
# to this loop (and ONLY this loop must have ONLY ONE
# reference).
n = 0
for idx, c in count.items():
if c == 1:
n += 1
if n == 1:
# Remove all others loops
for loop_idx, c in count.items():
if c != 1:
prefix.append(loop_idx)
if prefix not in prefix_to_remove:
prefix_to_remove.append(prefix)
stop_at_first_diff = True
else:
# here len(count) == 1
prefix.append(curr)
i += 1
# Remove all loops which start with these prefix
# if prefix_to_remove:
# debug__(loop_paths)
# for prefix in prefix_to_remove:
# debug__("prune %s" % repr(prefix))
for k, paths in loop_paths.items():
if k in self.false_loops:
continue
all_matches = True
for p in paths:
one_match = False
for prefix in prefix_to_remove:
if list_starts_with(p, prefix):
one_match = True
break
if not one_match:
all_matches = False
break
if all_matches:
self.false_loops.add(k)
def __update_loops(self):
def rec_remove(k):
if k not in self.loops_all:
return
del self.loops_all[k]
del self.loops_set[k]
for sub in self.deps[k]:
if sub in self.false_loops:
rec_remove(sub)
for k in self.false_loops:
if k not in self.rev_deps or k in self.all_deep_equiv:
rec_remove(k)
def loop_detection(self, entry, bypass_false_search=False):
start = time()
# Equivalent loops at a same deep in the loops dependencies tree
self.deep_equiv = set()
# For one loop : contains all address of the loop only
self.loops_set = {}
# For one loop : contains all address of the loop and sub-loops
self.loops_all = {}
# Loop dependencies
self.deps = {}
self.rev_deps = {}
# Loops marked as "False"
self.false_loops = set()
if len(self.nodes) > MAX_NODES:
self.skipped_loops_analysis = True
return
self.__explore(entry, set(), set(), {}, None, set())
self.roots = self.loops_set.keys() - self.rev_deps.keys()
self.__prune_loops()
if not bypass_false_search:
self.__search_false_loops()
self.__search_same_deep_equiv_loops()
self.__update_loops()
# Compute all address which are not in a loop
in_loop = set()
for l in self.loops_set.items():
in_loop.update(l[1])
# Rest of all address which are not in a loop
self.not_in_loop = self.nodes.keys() - in_loop
# Search inifinite loops
self.infinite_loop = set()
for l_curr_loop, l_set in self.loops_all.items():
if self.__is_inf_loop(l_set):
self.infinite_loop.add(l_curr_loop)
# Save first address of loops
self.loops_start = set()
for _, l_start in self.loops_all:
self.loops_start.add(l_start)
# For each loop we search the last node that if we enter in it,
# we are sure to return to the loop.
self.last_loop_node = {}
for (l_prev_loop, l_start), l_set in self.loops_all.items():
self.last_loop_node[(l_prev_loop, l_start)] = set()
self.__search_last_loop_node(set(), l_prev_loop, l_start, l_set)
elapsed = time()
elapsed = elapsed - start
debug__("Exploration: found %d loop(s) in %fs" %
(len(self.loops_all), elapsed))
|
barryrobison/arsenalsuite
|
refs/heads/master
|
cpp/lib/stonegui/__init__.py
|
211
|
import build
|
dhiltgen/docker-registry
|
refs/heads/master
|
docker_registry/__init__.py
|
77
|
# -*- coding: utf-8 -*-
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
groschovskiy/lerigos_music
|
refs/heads/master
|
Server/API/lib/system_tests/__init__.py
|
77
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
abantam/pmtud
|
refs/heads/master
|
nsc/scons-local-1.2.0.d20090223/SCons/Tool/MSCommon/vs.py
|
19
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/vs.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Module to detect Visual Studio and/or Visual C/C++
"""
import os
import SCons.Errors
import SCons.Util
from SCons.Tool.MSCommon.common import debug, \
read_reg, \
normalize_env, \
get_output, \
parse_output
class VisualStudio:
"""
An abstract base class for trying to find installed versions of
Visual Studio.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
self._cache = {}
#
def find_batch_file(self):
"""Try to find the Visual Studio or Visual C/C++ batch file.
Return None if failed or the batch file does not exist.
"""
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_batch_file(): no pdir')
return None
batch_file = os.path.normpath(os.path.join(pdir, self.batch_file))
batch_file = os.path.normpath(batch_file)
if not os.path.isfile(batch_file):
debug('find_batch_file(): %s not on file system' % batch_file)
return None
return batch_file
def find_executable(self):
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_executable(): no pdir')
return None
executable = os.path.join(pdir, self.executable_path)
executable = os.path.normpath(executable)
if not os.path.isfile(executable):
debug('find_executable(): %s not on file system' % executable)
return None
return executable
def find_vc_product_dir(self):
if not SCons.Util.can_read_reg:
debug('find_vc_product_dir(): can not read registry')
return None
key = self.hkey_root + '\\' + self.vc_product_dir_key
try:
comps = read_reg(key)
except WindowsError, e:
debug('find_vc_product_dir(): no registry key %s' % key)
else:
if self.batch_file_dir_reg_relpath:
comps = os.path.join(comps, self.batch_file_dir_reg_relpath)
comps = os.path.normpath(comps)
if os.path.exists(comps):
return comps
else:
debug('find_vc_product_dir(): %s not on file system' % comps)
d = os.environ.get(self.common_tools_var)
if not d:
msg = 'find_vc_product_dir(): no %s variable'
debug(msg % self.common_tools_var)
return None
if not os.path.isdir(d):
debug('find_vc_product_dir(): %s not on file system' % d)
return None
if self.batch_file_dir_env_relpath:
d = os.path.join(d, self.batch_file_dir_env_relpath)
d = os.path.normpath(d)
return d
#
def get_batch_file(self):
try:
return self._cache['batch_file']
except KeyError:
batch_file = self.find_batch_file()
self._cache['batch_file'] = batch_file
return batch_file
def get_executable(self):
try:
return self._cache['executable']
except KeyError:
executable = self.find_executable()
self._cache['executable'] = executable
return executable
def get_supported_arch(self):
try:
return self._cache['supported_arch']
except KeyError:
# RDEVE: for the time being use hardcoded lists
# supported_arch = self.find_supported_arch()
self._cache['supported_arch'] = self.supported_arch
return self.supported_arch
def get_vc_product_dir(self):
try:
return self._cache['vc_product_dir']
except KeyError:
vc_product_dir = self.find_vc_product_dir()
self._cache['vc_product_dir'] = vc_product_dir
return vc_product_dir
def reset(self):
self._cache = {}
# The list of supported Visual Studio versions we know how to detect.
#
# How to look for .bat file ?
# - VS 2008 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\9.0\Setup\VC\productdir
# * from environmnent variable VS90COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2008 Express (WoW6432: 32 bits on windows x64):
# Software\Wow6432Node\Microsoft\VCEpress\9.0\Setup\VC\productdir
#
# - VS 2005 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\8.0\Setup\VC\productdir
# * from environmnent variable VS80COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2005 Express (WoW6432: 32 bits on windows x64): does not seem to have a
# productdir ?
#
# - VS 2003 .Net (pro edition ? x86):
# * from registry key productdir. The path is then ..\Common7\Tools\
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\7.1\Setup\VC\productdir
# * from environmnent variable VS71COMNTOOLS: the path is the full path to
# vsvars32.bat
#
# - VS 98 (VS 6):
# * from registry key productdir. The path is then Bin
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\6.0\Setup\VC98\productdir
#
# The first version found in the list is the one used by default if
# there are multiple versions installed. Barring good reasons to
# the contrary, this means we should list versions from most recent
# to oldest. Pro versions get listed before Express versions on the
# assumption that, by default, you'd rather use the version you paid
# good money for in preference to whatever Microsoft makes available
# for free.
#
# If you update this list, update the documentation in Tool/msvs.xml.
SupportedVSList = [
# Visual Studio 2010
# TODO: find the settings, perhaps from someone with a CTP copy?
#VisualStudio('TBD',
# hkey_root=r'TBD',
# common_tools_var='TBD',
# batch_file='TBD',
# vc_product_dir_key=r'TBD',
# batch_file_dir_reg_relpath=None,
# batch_file_dir_env_relpath=r'TBD',
# executable_path=r'TBD',
# default_dirname='TBD',
#),
# Visual Studio 2008
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('9.0',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2008 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('9.0Exp',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86'],
),
# Visual Studio 2005
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('8.0',
hkey_root=r'Software\Microsoft\VisualStudio\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2005 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('8.0Exp',
hkey_root=r'Software\Microsoft\VCExpress\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
# The batch file is in the VC directory, so
# so the devenv.com executable is next door in ..\IDE.
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86'],
),
# Visual Studio .NET 2003
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.1',
hkey_root=r'Software\Microsoft\VisualStudio\7.1',
common_tools_var='VS71COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio .NET
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.0',
hkey_root=r'Software\Microsoft\VisualStudio\7.0',
common_tools_var='VS70COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio 6.0
VisualStudio('6.0',
hkey_root=r'Software\Microsoft\VisualStudio\6.0',
common_tools_var='VS60COMNTOOLS',
batch_file='vcvars32.bat',
vc_product_dir_key='Setup\Microsoft Visual C++\ProductDir',
batch_file_dir_reg_relpath='Bin',
batch_file_dir_env_relpath=None,
executable_path=r'Common\MSDev98\Bin\MSDEV.COM',
default_dirname='Microsoft Visual Studio',
supported_arch=['x86'],
),
]
SupportedVSMap = {}
for vs in SupportedVSList:
SupportedVSMap[vs.version] = vs
# Finding installed versions of Visual Studio isn't cheap, because it
# goes not only to the registry but also to the disk to sanity-check
# that there is, in fact, a Visual Studio directory there and that the
# registry entry isn't just stale. Find this information once, when
# requested, and cache it.
InstalledVSList = None
InstalledVSMap = None
def get_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
if InstalledVSList is None:
InstalledVSList = []
InstalledVSMap = {}
for vs in SupportedVSList:
debug('trying to find VS %s' % vs.version)
if vs.get_executable():
debug('found VS %s' % vs.version)
InstalledVSList.append(vs)
InstalledVSMap[vs.version] = vs
return InstalledVSList
def reset_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
InstalledVSList = None
InstalledVSMap = None
for vs in SupportedVSList:
vs.reset()
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
#SDKEnvironmentUpdates = {}
#
#def set_sdk_by_directory(env, sdk_dir):
# global SDKEnvironmentUpdates
# try:
# env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
# except KeyError:
# env_tuple_list = []
# SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
#
# include_path = os.path.join(sdk_dir, 'include')
# mfc_path = os.path.join(include_path, 'mfc')
# atl_path = os.path.join(include_path, 'atl')
#
# if os.path.exists(mfc_path):
# env_tuple_list.append(('INCLUDE', mfc_path))
# if os.path.exists(atl_path):
# env_tuple_list.append(('INCLUDE', atl_path))
# env_tuple_list.append(('INCLUDE', include_path))
#
# env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
#
# for variable, directory in env_tuple_list:
# env.PrependENVPath(variable, directory)
def detect_msvs():
return (len(get_installed_visual_studios()) > 0)
def get_vs_by_version(msvs):
if not SupportedVSMap.has_key(msvs):
msg = "Visual Studio version %s is not supported" % repr(msvs)
raise SCons.Errors.UserError, msg
get_installed_visual_studios()
vs = InstalledVSMap.get(msvs)
# Some check like this would let us provide a useful error message
# if they try to set a Visual Studio version that's not installed.
# However, we also want to be able to run tests (like the unit
# tests) on systems that don't, or won't ever, have it installed.
# It might be worth resurrecting this, with some configurable
# setting that the tests can use to bypass the check.
#if not vs:
# msg = "Visual Studio version %s is not installed" % repr(msvs)
# raise SCons.Errors.UserError, msg
return vs
def get_default_version(env):
"""Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available the visual studios through
query_versions, and take the highest one.
Return
------
version: str
the default version.
"""
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
# TODO(1.5):
#versions = [vs.version for vs in get_installed_visual_studios()]
versions = map(lambda vs: vs.version, get_installed_visual_studios())
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if not env.has_key('MSVS_VERSION'):
if versions:
env['MSVS_VERSION'] = versions[0] #use highest version by default
else:
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_default_arch(env):
"""Return the default arch to use for MSVS
if no version was requested by the user through the MSVS_ARCH environment
variable, select x86
Return
------
arch: str
"""
arch = env.get('MSVS_ARCH', 'x86')
msvs = InstalledVSMap.get(env['MSVS_VERSION'])
if not msvs:
arch = 'x86'
elif not arch in msvs.get_supported_arch():
fmt = "Visual Studio version %s does not support architecture %s"
raise SCons.Errors.UserError, fmt % (env['MSVS_VERSION'], arch)
return arch
def merge_default_version(env):
version = get_default_version(env)
arch = get_default_arch(env)
msvs = get_vs_by_version(version)
if msvs is None:
return
batfilename = msvs.get_batch_file()
# XXX: I think this is broken. This will silently set a bogus tool instead
# of failing, but there is no other way with the current scons tool
# framework
if batfilename is not None:
vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE')
msvs_list = get_installed_visual_studios()
# TODO(1.5):
#vscommonvarnames = [ vs.common_tools_var for vs in msvs_list ]
vscommonvarnames = map(lambda vs: vs.common_tools_var, msvs_list)
nenv = normalize_env(env['ENV'], vscommonvarnames + ['COMSPEC'])
output = get_output(batfilename, arch, env=nenv)
vars = parse_output(output, vars)
for k, v in vars.items():
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
"""Query the system to get available versions of VS. A version is
considered when a batfile is found."""
msvs_list = get_installed_visual_studios()
# TODO(1.5)
#versions = [ msvs.version for msvs in msvs_list ]
versions = map(lambda msvs: msvs.version, msvs_list)
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Bachaco-ve/odoo
|
refs/heads/8.0
|
addons/l10n_eu_service/wizard/wizard.py
|
242
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import Warning
from openerp import fields, models, api
from openerp.tools.translate import _
class l10n_eu_service(models.TransientModel):
"""Create fiscal positions for EU Service VAT"""
_name = "l10n_eu_service.wizard"
_description = __doc__
def _get_eu_res_country_group(self):
eu_group = self.env.ref("base.europe", raise_if_not_found=False)
if not eu_group:
raise Warning(_('The Europe country group cannot be found. '
'Please update the base module.'))
return eu_group
def _default_chart_id(self):
user = self.env.user
return self.env['account.account'].search(
[('company_id', '=', user.company_id.id), ('parent_id', '=', False)], limit=1)
def _default_fiscal_position_id(self):
user = self.env.user
eu_id = self._get_eu_res_country_group()
return self.env['account.fiscal.position'].search(
[('company_id', '=', user.company_id.id), ('vat_required', '=', True),
('country_group_id.id', '=', eu_id.id)], limit=1)
def _default_tax_id(self):
user = self.env.user
return self.env['account.tax'].search(
[('company_id', '=', user.company_id.id), ('type_tax_use', '=', 'sale'),
('type', '=', 'percent'), ('account_collected_id', '!=', False),
('tax_code_id', '!=', False)], limit=1, order='amount desc')
def _default_done_country_ids(self):
user = self.env.user
eu_country_group = self._get_eu_res_country_group()
return eu_country_group.country_ids - self._default_todo_country_ids() - user.company_id.country_id
def _default_todo_country_ids(self):
user = self.env.user
eu_country_group = self._get_eu_res_country_group()
eu_fiscal = self.env['account.fiscal.position'].search(
[('country_id', 'in', eu_country_group.country_ids.ids),
('vat_required', '=', False), ('auto_apply', '=', True),
('company_id', '=', user.company_id.id)])
return eu_country_group.country_ids - eu_fiscal.mapped('country_id') - user.company_id.country_id
chart_id = fields.Many2one(
"account.account", string="Chart of Accounts", required=True, default=_default_chart_id)
company_id = fields.Many2one(
'res.company', string='Company', required=True,
related='chart_id.company_id', readonly=True)
fiscal_position_id = fields.Many2one(
'account.fiscal.position', string='Fiscal Position', default=_default_fiscal_position_id,
help="Optional fiscal position to use as template for general account mapping. "
"Should usually be your current Intra-EU B2B fiscal position. "
"If not set, no general account mapping will be configured for EU fiscal positions.")
tax_id = fields.Many2one(
'account.tax', string='Service VAT', required=True, default=_default_tax_id,
help="Select your current VAT tax for services. This is the tax that will be mapped "
"to the corresponding VAT tax in each EU country selected below.")
account_collected_id = fields.Many2one(
"account.account", string="Tax Collection Account",
help="Optional account to use for collecting tax amounts when selling services in "
"each EU country selected below. If not set, the current collecting account of "
"your Service VAT will be used.")
done_country_ids = fields.Many2many(
'res.country', 'l10n_eu_service_country_rel_done', default=_default_done_country_ids,
string='Already Supported')
todo_country_ids = fields.Many2many(
'res.country', 'l10n_eu_service_country_rel_todo', default=_default_todo_country_ids,
string='EU Customers From', required=True)
@api.multi
def generate_eu_service(self):
imd = self.env['ir.model.data']
tax_code = self.env['account.tax.code']
tax_rate = self.env["l10n_eu_service.service_tax_rate"]
account_tax = self.env['account.tax']
fpos = self.env['account.fiscal.position']
chart_xid = 'l10n_eu_service.tax_chart_service_eu_company_%s' % self.company_id.name
chart = self.env.ref(chart_xid, raise_if_not_found=False)
if not chart:
vals = {
'name': _("EU MOSS VAT Chart - %(company)s") % {'company': self.company_id.name},
'company_id': self.company_id.id,
'parent_id': False
}
chart_id = tax_code.create(vals).id
vals_data = {
'name': 'tax_chart_service_eu_company_%s'%(self.company_id.name),
'model': 'account.tax.code',
'module': 'l10n_eu_service',
'res_id': chart_id,
'noupdate': True, # Don't drop it when module is updated
}
imd.create(vals_data)
else:
chart_id = chart.id
for country in self.todo_country_ids:
format_params = {'country_name': country.name}
tx_base_code_data = {
'name': _("Base - VAT for EU Services to %(country_name)s") % format_params,
'code': "BASE-EU-VAT-%s" % country.code,
'parent_id': chart_id,
}
tax_name = _("VAT for EU Services to %(country_name)s") % format_params
tx_code_data = {
'name': tax_name,
'code': "EU-VAT-%s" % country.code,
'parent_id': chart_id,
}
tx_base_code = tax_code.create(tx_base_code_data)
tx_code = tax_code.create(tx_code_data)
#create a new tax based on the selected service tax
data_tax = {
'name': tax_name,
'amount': tax_rate.search([('country_id', '=', country.id)]).rate,
'base_code_id': self.tax_id.base_code_id.id,
'account_collected_id': self.account_collected_id.id or self.tax_id.account_collected_id.id,
'account_paid_id': self.account_collected_id.id or self.tax_id.account_collected_id.id,
'type_tax_use': 'sale',
'base_code_id': tx_base_code.id,
'ref_base_code_id': tx_base_code.id,
'tax_code_id': tx_code.id,
'ref_tax_code_id': tx_code.id,
'ref_base_sign': -1,
'ref_tax_sign': -1,
'description': "EU-VAT-%s-S" % country.code,
'sequence': 1000,
}
tax = account_tax.create(data_tax)
if self.fiscal_position_id:
account_ids = [(6, 0, self.fiscal_position_id.account_ids.ids)]
else:
account_ids = False
#create a fiscal position for the country
fiscal_pos_name = _("Intra-EU B2C in %(country_name)s") % {'country_name': country.name}
fiscal_pos_name += " (EU-VAT-%s)" % country.code
data_fiscal = {
'name': fiscal_pos_name,
'company_id': self.chart_id.company_id.id,
'vat_required': False,
'auto_apply': True,
'country_id': country.id,
'account_ids': account_ids,
'tax_ids': [(0, 0, {'tax_src_id': self.tax_id.id, 'tax_dest_id': tax.id})],
}
fpos.create(data_fiscal)
return {'type': 'ir.actions.act_window_close'}
|
ludmilamarian/invenio
|
refs/heads/master
|
invenio/ext/admin/__init__.py
|
3
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Flask-Admin support in Invenio
------------------------------
Please see http://flask-admin.readthedocs.org/en/latest/quickstart/ prior to
reading this documentation, to understand how Flask-Admin works.
Flask admin allows you to easily create web administration interfaces for your
SQLAlchemy models. This extension takes care of using Blueprint as base class
for the admin views.
By default this extension will look for invenio.<name>_admin modules and call
the method register_admin(app, admin) in each module to allow to register its
administration views.
By default all view are restricted to super users only. This can be changed via
the acc_<action>_action class variables.
Usage example - create a file called <module>_admin.py::
from invenio.ext.admin import InvenioModelView
from invenio.ext.sqlalchemy import db
from invenio.<module>_models import MyModel
class MyModelAdmin(InvenioModelView):
acc_edit_action = 'cfgmymodel'
_can_create = False
_can_edit = True
_can_delete = False
# ... Flaks-Admin options ...
# column_list = ( ... )
def __init__(self, model, session, **kwargs):
super(MyModelAdmin, self).__init__(model, session, **kwargs)
def register_admin(app, admin):
admin.add_view(MyModelAdmin(MyModel, db.session, name='My model',
category="My Category"))
Admin UI skins
~~~~~~~~~~~~~~
AdminLTE provides several different skins, please see
https://almsaeedstudio.com/themes/AdminLTE/documentation/index.html#layout.
A global variable `ADMIN_UI_SKIN` is defined and is set to `skin-blue` as
default. To change the skin, just edit the value of the variable to one of
the provided skins.
"""
from __future__ import absolute_import
from flask_admin import Admin
from flask_registry import ModuleAutoDiscoveryRegistry
from .views import AdminIndexView
class AdminDiscoveryRegistry(ModuleAutoDiscoveryRegistry):
"""Utility method."""
setup_func_name = 'register_admin'
def __init__(self, *args, **kwargs):
self.admin = kwargs.pop('admin', None)
super(AdminDiscoveryRegistry, self).__init__(*args, **kwargs)
def register(self, module, *args, **kwargs):
super(AdminDiscoveryRegistry, self).register(
module, self.app, self.admin, *args, **kwargs
)
def setup_app(app):
"""Register all administration views with the Flask application."""
app.config.setdefault("ADMIN_NAME", "Invenio")
app.config.setdefault("ADMIN_UI_SKIN", "skin-blue")
# Initialize app
admin = Admin(
name=app.config['ADMIN_NAME'],
index_view=AdminIndexView(menu_icon_type='fa', menu_icon_value='fa-home'),
base_template="admin_base.html",
template_mode='bootstrap3'
)
# TODO remove when 1.2.1 is released
if not hasattr(admin, 'category_icon_classes'):
admin.category_icon_classes = {}
admin.init_app(app)
# Create registry and run discovery
app.extensions['registry']['admin'] = AdminDiscoveryRegistry(
'admin', app=app, with_setup=True, admin=admin
)
|
chuchiperriman/cloud-services-notifications
|
refs/heads/master
|
src/cloudsn/core/indicator.py
|
1
|
import os
from cloudsn.core import config, utils, notification
from cloudsn import logger
class Indicator:
def get_name(self):
return None
def set_active(self, active):
pass
def create_indicator(self, acc):
pass
def update_account(self, acc):
pass
def remove_indicator(self, acc):
pass
def update_error(self, acc):
pass
class IndicatorManager():
__default = None
def __init__(self):
if IndicatorManager.__default:
raise IndicatorManager.__default
self.indicator= None
self.indicators = {}
from cloudsn.ui.indicators import statusicon
indi_statusicon = statusicon.StatusIconIndicator()
self.indicators[indi_statusicon.get_name()] = indi_statusicon
indi_indicator = None
try:
from cloudsn.ui.indicators import indicatorapplet
indi_indicator = indicatorapplet.IndicatorApplet()
self.indicators[indi_indicator.get_name()] = indi_indicator
except Exception,e:
logger.exception("The indicator applet provider cannot be loaded: %s", e)
indi_messagingmenu = None
try:
from cloudsn.ui.indicators import messagingmenu
indi_messagingmenu = messagingmenu.IndicatorApplet()
self.indicators[indi_messagingmenu.get_name()] = indi_messagingmenu
except Exception,e:
logger.exception("The message menu applet provider cannot be loaded: %s", e)
self.config = config.SettingsController.get_instance()
indicator_conf = self.config.get_prefs()["indicator"]
if indicator_conf:
for name in self.indicators:
if name == indicator_conf:
self.indicator = self.indicators[name]
break
if not self.indicator:
logger.error("The indicator named %s is configured but it cannot be found" % (indicator_conf))
notification.notify (_("Indicator error"),
_("The indicator named %s is configured but it cannot be found") % (indicator_conf),
utils.get_error_pixbuf())
if not self.indicator:
if "DESKTOP_SESSION" in os.environ and os.environ["DESKTOP_SESSION"] == 'ubuntu':
indi_fin = indi_messagingmenu if indi_messagingmenu else indi_indicator
if not indi_fin:
notification.notify (_("Indicator error"),
_("The indicator for ubuntu cannot be loaded "),
utils.get_error_pixbuf())
raise Error(_("The indicator for ubuntu cannot be loaded "))
self.indicator = indi_fin
else:
self.indicator = indi_statusicon
self.indicator.set_active(True)
@staticmethod
def get_instance():
if not IndicatorManager.__default:
IndicatorManager.__default = IndicatorManager()
return IndicatorManager.__default
def get_indicator(self):
return self.indicator
def get_indicators(self):
return self.indicators.values()
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/urllib3-1.25.8/src/urllib3/util/retry.py
|
28
|
from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
DEFAULT_METHOD_WHITELIST = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(["Authorization"])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST,
):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
schleichdi2/OPENNFR-6.0-CORE
|
refs/heads/master
|
opennfr-openembedded-core/scripts/lib/recipetool/append.py
|
2
|
# Recipe creation tool - append plugin
#
# Copyright (C) 2015 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
import argparse
import glob
import fnmatch
import re
import subprocess
import logging
import stat
import shutil
import scriptutils
import errno
from collections import defaultdict
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
# FIXME guessing when we don't have pkgdata?
# FIXME mode to create patch rather than directly substitute
class InvalidTargetFileError(Exception):
pass
def find_target_file(targetpath, d, pkglist=None):
"""Find the recipe installing the specified target path, optionally limited to a select list of packages"""
import json
pkgdata_dir = d.getVar('PKGDATA_DIR')
# The mix between /etc and ${sysconfdir} here may look odd, but it is just
# being consistent with usage elsewhere
invalidtargets = {'${sysconfdir}/version': '${sysconfdir}/version is written out at image creation time',
'/etc/timestamp': '/etc/timestamp is written out at image creation time',
'/dev/*': '/dev is handled by udev (or equivalent) and the kernel (devtmpfs)',
'/etc/passwd': '/etc/passwd should be managed through the useradd and extrausers classes',
'/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
'/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
'${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
raise InvalidTargetFileError(d.expand(message))
targetpath_re = re.compile(r'\s+(\$D)?%s(\s|$)' % targetpath)
recipes = defaultdict(list)
for root, dirs, files in os.walk(os.path.join(pkgdata_dir, 'runtime')):
if pkglist:
filelist = pkglist
else:
filelist = files
for fn in filelist:
pkgdatafile = os.path.join(root, fn)
if pkglist and not os.path.exists(pkgdatafile):
continue
with open(pkgdatafile, 'r') as f:
pn = ''
# This does assume that PN comes before other values, but that's a fairly safe assumption
for line in f:
if line.startswith('PN:'):
pn = line.split(':', 1)[1].strip()
elif line.startswith('FILES_INFO:'):
val = line.split(':', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
recipes[targetpath].append('!%s' % pn)
return recipes
def _parse_recipe(pn, tinfoil):
try:
rd = tinfoil.parse_recipe(pn)
except bb.providers.NoProvider as e:
logger.error(str(e))
return None
return rd
def determine_file_source(targetpath, rd):
"""Assuming we know a file came from a specific recipe, figure out exactly where it came from"""
import oe.recipeutils
# See if it's in do_install for the recipe
workdir = rd.getVar('WORKDIR')
src_uri = rd.getVar('SRC_URI')
srcfile = ''
modpatches = []
elements = check_do_install(rd, targetpath)
if elements:
logger.debug('do_install line:\n%s' % ' '.join(elements))
srcpath = get_source_path(elements)
logger.debug('source path: %s' % srcpath)
if not srcpath.startswith('/'):
# Handle non-absolute path
srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs').split()[-1], srcpath))
if srcpath.startswith(workdir):
# OK, now we have the source file name, look for it in SRC_URI
workdirfile = os.path.relpath(srcpath, workdir)
# FIXME this is where we ought to have some code in the fetcher, because this is naive
for item in src_uri.split():
localpath = bb.fetch2.localpath(item, rd)
# Source path specified in do_install might be a glob
if fnmatch.fnmatch(os.path.basename(localpath), workdirfile):
srcfile = 'file://%s' % localpath
elif '/' in workdirfile:
if item == 'file://%s' % workdirfile:
srcfile = 'file://%s' % localpath
# Check patches
srcpatches = []
patchedfiles = oe.recipeutils.get_recipe_patched_files(rd)
for patch, filelist in patchedfiles.items():
for fileitem in filelist:
if fileitem[0] == srcpath:
srcpatches.append((patch, fileitem[1]))
if srcpatches:
addpatch = None
for patch in srcpatches:
if patch[1] == 'A':
addpatch = patch[0]
else:
modpatches.append(patch[0])
if addpatch:
srcfile = 'patch://%s' % addpatch
return (srcfile, elements, modpatches)
def get_source_path(cmdelements):
"""Find the source path specified within a command"""
command = cmdelements[0]
if command in ['install', 'cp']:
helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True).decode('utf-8')
argopts = ''
argopt_line_re = re.compile('^-([a-zA-Z0-9]), --[a-z-]+=')
for line in helptext.splitlines():
line = line.lstrip()
res = argopt_line_re.search(line)
if res:
argopts += res.group(1)
if not argopts:
# Fallback
if command == 'install':
argopts = 'gmoSt'
elif command == 'cp':
argopts = 't'
else:
raise Exception('No fallback arguments for command %s' % command)
skipnext = False
for elem in cmdelements[1:-1]:
if elem.startswith('-'):
if len(elem) > 1 and elem[1] in argopts:
skipnext = True
continue
if skipnext:
skipnext = False
continue
return elem
else:
raise Exception('get_source_path: no handling for command "%s"')
def get_func_deps(func, d):
"""Find the function dependencies of a shell function"""
deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
deps |= set((d.getVarFlag(func, "vardeps") or "").split())
funcdeps = []
for dep in deps:
if d.getVarFlag(dep, 'func'):
funcdeps.append(dep)
return funcdeps
def check_do_install(rd, targetpath):
"""Look at do_install for a command that installs/copies the specified target path"""
instpath = os.path.abspath(os.path.join(rd.getVar('D'), targetpath.lstrip('/')))
do_install = rd.getVar('do_install')
# Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
deps = get_func_deps('do_install', rd)
for dep in deps:
do_install = do_install.replace(dep, rd.getVar(dep))
# Look backwards through do_install as we want to catch where a later line (perhaps
# from a bbappend) is writing over the top
for line in reversed(do_install.splitlines()):
line = line.strip()
if (line.startswith('install ') and ' -m' in line) or line.startswith('cp '):
elements = line.split()
destpath = os.path.abspath(elements[-1])
if destpath == instpath:
return elements
elif destpath.rstrip('/') == os.path.dirname(instpath):
# FIXME this doesn't take recursive copy into account; unsure if it's practical to do so
srcpath = get_source_path(elements)
if fnmatch.fnmatchcase(os.path.basename(instpath), os.path.basename(srcpath)):
return elements
return None
def appendfile(args):
import oe.recipeutils
stdout = ''
try:
(stdout, _) = bb.process.run('LANG=C file -b %s' % args.newfile, shell=True)
if 'cannot open' in stdout:
raise bb.process.ExecutionError(stdout)
except bb.process.ExecutionError as err:
logger.debug('file command returned error: %s' % err)
stdout = ''
if stdout:
logger.debug('file command output: %s' % stdout.rstrip())
if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
logger.warn('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
if args.recipe:
recipes = {args.targetpath: [args.recipe],}
else:
try:
recipes = find_target_file(args.targetpath, tinfoil.config_data)
except InvalidTargetFileError as e:
logger.error('%s cannot be handled by this tool: %s' % (args.targetpath, e))
return 1
if not recipes:
logger.error('Unable to find any package producing path %s - this may be because the recipe packaging it has not been built yet' % args.targetpath)
return 1
alternative_pns = []
postinst_pns = []
selectpn = None
for targetpath, pnlist in recipes.items():
for pn in pnlist:
if pn.startswith('?'):
alternative_pns.append(pn[1:])
elif pn.startswith('!'):
postinst_pns.append(pn[1:])
elif selectpn:
# hit here with multilibs
continue
else:
selectpn = pn
if not selectpn and len(alternative_pns) == 1:
selectpn = alternative_pns[0]
logger.error('File %s is an alternative possibly provided by recipe %s but seemingly no other, selecting it by default - you should double check other recipes' % (args.targetpath, selectpn))
if selectpn:
logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
if postinst_pns:
logger.warn('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
rd = _parse_recipe(selectpn, tinfoil)
if not rd:
# Error message already shown
return 1
sourcefile, instelements, modpatches = determine_file_source(args.targetpath, rd)
sourcepath = None
if sourcefile:
sourcetype, sourcepath = sourcefile.split('://', 1)
logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
if sourcetype == 'patch':
logger.warn('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
sourcepath = None
else:
logger.debug('Unable to determine source file, proceeding anyway')
if modpatches:
logger.warn('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
if instelements and sourcepath:
install = None
else:
# Auto-determine permissions
# Check destination
binpaths = '${bindir}:${sbindir}:${base_bindir}:${base_sbindir}:${libexecdir}:${sysconfdir}/init.d'
perms = '0644'
if os.path.abspath(os.path.dirname(args.targetpath)) in rd.expand(binpaths).split(':'):
# File is going into a directory normally reserved for executables, so it should be executable
perms = '0755'
else:
# Check source
st = os.stat(args.newfile)
if st.st_mode & stat.S_IXUSR:
perms = '0755'
install = {args.newfile: (args.targetpath, perms)}
oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
return 0
else:
if alternative_pns:
logger.error('File %s is an alternative possibly provided by the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(alternative_pns)))
elif postinst_pns:
logger.error('File %s may be written out in a pre/postinstall script of the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(postinst_pns)))
return 3
def appendsrc(args, files, rd, extralines=None):
import oe.recipeutils
srcdir = rd.getVar('S')
workdir = rd.getVar('WORKDIR')
import bb.fetch
simplified = {}
src_uri = rd.getVar('SRC_URI').split()
for uri in src_uri:
if uri.endswith(';'):
uri = uri[:-1]
simple_uri = bb.fetch.URI(uri)
simple_uri.params = {}
simplified[str(simple_uri)] = uri
copyfiles = {}
extralines = extralines or []
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
srcdir = os.path.join(workdir, 'git')
if not bb.data.inherits_class('kernel-yocto', rd):
logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
source_uri = 'file://{0}'.format(os.path.basename(srcfile))
if src_destdir and src_destdir != '.':
source_uri += ';subdir={0}'.format(src_destdir)
simple = bb.fetch.URI(source_uri)
simple.params = {}
simple_str = str(simple)
if simple_str in simplified:
existing = simplified[simple_str]
if source_uri != existing:
logger.warn('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
else:
logger.warn('{0!r} is already in SRC_URI, not adding'.format(source_uri))
else:
extralines.append('SRC_URI += {0}'.format(source_uri))
copyfiles[newfile] = srcfile
oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
def appendsrcfiles(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
if not recipedata:
parser.error('RECIPE must be a valid recipe name')
files = dict((f, os.path.join(args.destdir, os.path.basename(f)))
for f in args.files)
return appendsrc(args, files, recipedata)
def appendsrcfile(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
if not recipedata:
parser.error('RECIPE must be a valid recipe name')
if not args.destfile:
args.destfile = os.path.basename(args.file)
elif args.destfile.endswith('/'):
args.destfile = os.path.join(args.destfile, os.path.basename(args.file))
return appendsrc(args, {args.file: args.destfile}, recipedata)
def layer(layerpath):
if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
return layerpath
def existing_path(filepath):
if not os.path.exists(filepath):
raise argparse.ArgumentTypeError('{0!r} must be an existing path'.format(filepath))
return filepath
def existing_file(filepath):
filepath = existing_path(filepath)
if os.path.isdir(filepath):
raise argparse.ArgumentTypeError('{0!r} must be a file, not a directory'.format(filepath))
return filepath
def destination_path(destpath):
if os.path.isabs(destpath):
raise argparse.ArgumentTypeError('{0!r} must be a relative path, not absolute'.format(destpath))
return destpath
def target_path(targetpath):
if not os.path.isabs(targetpath):
raise argparse.ArgumentTypeError('{0!r} must be an absolute path, not relative'.format(targetpath))
return targetpath
def register_commands(subparsers):
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-m', '--machine', help='Make bbappend changes specific to a machine only', metavar='MACHINE')
common.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
common.add_argument('destlayer', metavar='DESTLAYER', help='Base directory of the destination layer to write the bbappend to', type=layer)
parser_appendfile = subparsers.add_parser('appendfile',
parents=[common],
help='Create/update a bbappend to replace a target file',
description='Creates a bbappend (or updates an existing one) to replace the specified file that appears in the target system, determining the recipe that packages the file and the required path and name for the bbappend automatically. Note that the ability to determine the recipe packaging a particular file depends upon the recipe\'s do_packagedata task having already run prior to running this command (which it will have when the recipe has been built successfully, which in turn will have happened if one or more of the recipe\'s packages is included in an image that has been built successfully).')
parser_appendfile.add_argument('targetpath', help='Path to the file to be replaced (as it would appear within the target image, e.g. /etc/motd)', type=target_path)
parser_appendfile.add_argument('newfile', help='Custom file to replace the target file with', type=existing_file)
parser_appendfile.add_argument('-r', '--recipe', help='Override recipe to apply to (default is to find which recipe already packages the file)')
parser_appendfile.set_defaults(func=appendfile, parserecipes=True)
common_src = argparse.ArgumentParser(add_help=False, parents=[common])
common_src.add_argument('-W', '--workdir', help='Unpack file into WORKDIR rather than S', dest='use_workdir', action='store_true')
common_src.add_argument('recipe', metavar='RECIPE', help='Override recipe to apply to')
parser = subparsers.add_parser('appendsrcfiles',
parents=[common_src],
help='Create/update a bbappend to add or replace source files',
description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
parser = subparsers.add_parser('appendsrcfile',
parents=[common_src],
help='Create/update a bbappend to add or replace a source file',
description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
|
misgeatgit/opencog
|
refs/heads/master
|
opencog/python/monitor_changes.py
|
7
|
from opencog.atomspace import AtomSpace, types, Atom, Handle, TruthValue, types as t
import opencog.cogserver
import random
from time import sleep
def monitor_changes(atomspace):
tv_delta = 0.1
interval = 5 + 5*random.random() # seconds
t = types
# Get latest and previous time nodes with large enough interval
times = atomspace.get_atoms_by_type(t.TimeNode, subtype = False)
times = [f for f in times if f.name != "0"] # Related to a bug in the Psi Modulator system
times = sorted(times, key= lambda t: int(t.name) )
latest_time = times[-1]
previous_time = latest_time
for time in reversed(times):
if int(latest_time.name) - int(time.name) >= interval*100:
previous_time = time
break
# print "TimeNodes: "
# print [str(latest_time), str(previous_time)]
# Detect changes
at_times_latest = latest_time.incoming_by_type(t.AtTimeLink, subtype = False)
at_times_previous = previous_time.incoming_by_type(t.AtTimeLink, subtype = False)
changes_with_tv = []
changes_with_arg = []
for latest in at_times_latest:
for previous in at_times_previous:
eval_latest = latest.out[1]
eval_previous = previous.out[1]
if eval_latest.t != t.EvaluationLink or eval_previous.t != t.EvaluationLink:
continue
if eval_latest == eval_previous:
if abs( latest.tv.mean - previous.tv.mean ) >= tv_delta:
changes_with_tv.append( [latest, previous] )
changes_with_arg = list( set(at_times_latest) ^ set(at_times_previous) )
# Remove previous records of changes in the atomspace
pred_change_with_tv = atomspace.add_node(t.PredicateNode, "change_with_tv")
pred_change_with_arg = atomspace.add_node(t.PredicateNode, "change_with_arg")
pred_has_dramatic_changes = atomspace.add_node(t.PredicateNode, "has_dramatic_changes")
old_changes_with_tv = pred_change_with_tv.incoming_by_type(t.ReferenceLink, subtype = False)
for old_change_with_tv in old_changes_with_tv:
list_link = old_change_with_tv.out[1]
atomspace.remove(old_change_with_tv, recursive = False)
atomspace.remove(list_link, recursive = False)
old_changes_with_arg = pred_change_with_arg.incoming_by_type(t.ReferenceLink, subtype = False)
for old_change_with_arg in old_changes_with_arg:
list_link = old_change_with_arg.out[1]
atomspace.remove(old_change_with_arg, recursive = False)
atomspace.remove(list_link, recursive = False)
eval_has_dramatic_changes = atomspace.add_link(t.EvaluationLink, [pred_has_dramatic_changes])
atomspace.set_tv(eval_has_dramatic_changes.h, TruthValue(0, 0))
# Record the changes in the atomspace
for change_with_tv in changes_with_tv:
list_link = atomspace.add_link(t.ListLink, change_with_tv)
eval_link = atomspace.add_link(t.ReferenceLink, [pred_change_with_tv, list_link])
# print eval_link
if changes_with_arg:
list_link = atomspace.add_link(t.ListLink, changes_with_arg)
eval_link = atomspace.add_link(t.ReferenceLink, [pred_change_with_arg, list_link])
# print eval_link
if changes_with_tv or changes_with_arg:
atomspace.set_tv(eval_has_dramatic_changes.h, TruthValue(1, 1))
print "Found " + str(len(changes_with_tv)) + " changes_with_tv"
print "Found " + str(len(changes_with_arg)) + " changes_with_arg"
print eval_has_dramatic_changes
def monitor_loop(atomspace):
while True:
wait = random.randint(2,5)
sleep(wait)
monitor_changes(atomspace)
class MonitorChangesMindAgent(opencog.cogserver.MindAgent):
def __init__(self):
self.cycles = 1
self.is_running = False
def run(self,atomspace):
# print "step MonitorChangesMindAgent"
# Python thread is awkward. The code below blocks everything!
# if not self.is_running:
# self.monitor_thread = Thread(target = monitor_loop, args=(atomspace,))
# self.monitor_thread.start()
# self.is_running = True
monitor_changes(atomspace)
self.cycles += 1
|
kjw0106/boto
|
refs/heads/master
|
tests/integration/codedeploy/__init__.py
|
586
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.