repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
brain-tec/stock-logistics-workflow | refs/heads/8.0 | stock_lock_lot/tests/test_locking.py | 5 | # -*- coding: utf-8 -*-
# © 2015 Numérigraphe
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.addons.stock.tests.common import TestStockCommon
from openerp import exceptions
class TestLockingUnlocking(TestStockCommon):
def setUp(self):
super(TestLockingUnlocking, self).setUp()
self.env['stock.config.settings']._write_or_create_param(
'stock.lock.lot.strict', False)
self.LotObj = self.env['stock.production.lot']
# Create a lot
self.productA.type = 'product'
self.lot = self.LotObj.create({'name': 'Test Lot',
'product_id': self.productA.id})
# Make sure we have this lot in stock
inventory = self.InvObj.create({'name': 'Test Lot',
'filter': 'product',
'product_id': self.productA.id})
inventory.prepare_inventory()
self.InvLineObj.create({'inventory_id': inventory.id,
'product_id': self.productA.id,
'product_uom_id': self.productA.uom_id.id,
'product_qty': 10,
'location_id': self.stock_location,
'prod_lot_id': self.lot.id})
inventory.action_done()
# Record a picking
self.picking_out = self.PickingObj.create(
{'picking_type_id': self.picking_type_out})
self.MoveObj.create(
{'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 5,
'product_uom': self.productA.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'restrict_lot_id': self.lot.id})
# Make an unauthorized user
self.unauthorized_user = self.env["res.users"].create({
"name": __file__,
"login": __file__,
})
def test_lock(self):
# Verify the button locks the lot
self.lot.button_lock()
# Verify unauthorized users can't unlock the lot
with self.assertRaises(exceptions.AccessError):
self.lot.sudo(self.unauthorized_user).button_lock()
self.assertTrue(self.lot.locked,
"The lot should be locked when the button is pressed")
self.assertTrue(all([quant.locked for quant in self.lot.quant_ids]),
"All the quants should be locked when lot is locked")
# Verify the lot isn't reserved automatically
self.picking_out.action_assign()
for move in self.picking_out.move_lines:
self.assertNotEqual(
move.state, 'assigned',
"The stock move should not be assigned: the lot is locked."
"Reserved lots: %s" % [(q.id, q.lot_id.name)
for q in move.reserved_quant_ids])
def test_unlock(self):
self.lot.button_lock()
# Verify unauthorized users can't unlock the lot
with self.assertRaises(exceptions.AccessError):
self.lot.sudo(self.unauthorized_user).button_unlock()
# Verify the button unlocks the lot when it's been locked
self.lot.button_unlock()
self.assertFalse(
self.lot.locked,
"The lot should be unlocked when the button is pressed")
self.assertFalse(
all([quant.locked for quant in self.lot.quant_ids]),
"All the quants should be unlocked when lot is unlocked")
# Verify the lot is reserved automatically
self.picking_out.action_assign()
for move in self.picking_out.move_lines:
self.assertEqual(
move.state, 'assigned',
'The stock move should be assigned')
def test_lock_unreserve(self):
"""Blocking a lot must unreserve all the quants"""
# Reserve the lot
self.picking_out.action_assign()
# Check the lot has reservations
domain = [('lot_id', '=', self.lot.id),
('reservation_id', '!=', False)]
self.assertTrue(self.env['stock.quant'].search_count(domain))
# Lock and check the lot has no reservations anymore
self.lot.button_lock()
self.assertFalse(self.env['stock.quant'].search_count(domain))
def test_category_locked(self):
self.productA.categ_id.lot_default_locked = True
lot1 = self.LotObj.create({'name': 'Lot in locked category',
'product_id': self.productA.id})
self.assertTrue(lot1.locked, 'Category demands to lock new lots')
def test_wizard(self):
wizard = self.env['wiz.lock.lot'].with_context(
active_ids=[self.lot.id])
wizard.action_lock_lots()
self.assertTrue(self.lot.locked, 'Wizard failed to lock the lot')
wizard.action_unlock_lots()
self.assertFalse(self.lot.locked, 'Wizard failed to unlock the lot')
def test_allow_lock(self):
self.lot.button_lock()
self.picking_out.location_dest_id.allow_locked = True
self.picking_out.action_assign()
for move in self.picking_out.move_lines:
self.assertEqual(move.state, 'assigned')
|
antepsis/anteplahmacun | refs/heads/master | sympy/physics/quantum/boson.py | 22 | """Bosonic quantum operators."""
from sympy import Mul, Integer, exp, sqrt, conjugate
from sympy.physics.quantum import Operator
from sympy.physics.quantum import HilbertSpace, FockSpace, Ket, Bra, IdentityOperator
from sympy.functions.special.tensor_functions import KroneckerDelta
__all__ = [
'BosonOp',
'BosonFockKet',
'BosonFockBra',
'BosonCoherentKet',
'BosonCoherentBra'
]
class BosonOp(Operator):
"""A bosonic operator that satisfies [a, Dagger(a)] == 1.
Parameters
==========
name : str
A string that labels the bosonic mode.
annihilation : bool
A bool that indicates if the bosonic operator is an annihilation (True,
default value) or creation operator (False)
Examples
========
>>> from sympy.physics.quantum import Dagger, Commutator
>>> from sympy.physics.quantum.boson import BosonOp
>>> a = BosonOp("a")
>>> Commutator(a, Dagger(a)).doit()
1
"""
@property
def name(self):
return self.args[0]
@property
def is_annihilation(self):
return bool(self.args[1])
@classmethod
def default_args(self):
return ("a", True)
def __new__(cls, *args, **hints):
if not len(args) in [1, 2]:
raise ValueError('1 or 2 parameters expected, got %s' % args)
if len(args) == 1:
args = (args[0], Integer(1))
if len(args) == 2:
args = (args[0], Integer(args[1]))
return Operator.__new__(cls, *args)
def _eval_commutator_BosonOp(self, other, **hints):
if self.name == other.name:
# [a^\dagger, a] = -1
if not self.is_annihilation and other.is_annihilation:
return Integer(-1)
elif 'independent' in hints and hints['independent']:
# [a, b] = 0
return Integer(0)
return None
def _eval_commutator_FermionOp(self, other, **hints):
return Integer(0)
def _eval_anticommutator_BosonOp(self, other, **hints):
if 'independent' in hints and hints['independent']:
# {a, b} = 2 * a * b, because [a, b] = 0
return 2 * self * other
return None
def _eval_adjoint(self):
return BosonOp(str(self.name), not self.is_annihilation)
def __mul__(self, other):
if other == IdentityOperator(2):
return self
if isinstance(other, Mul):
args1 = tuple(arg for arg in other.args if arg.is_commutative)
args2 = tuple(arg for arg in other.args if not arg.is_commutative)
x = self
for y in args2:
x = x * y
return Mul(*args1) * x
return Mul(self, other)
def _print_contents_latex(self, printer, *args):
if self.is_annihilation:
return r'{%s}' % str(self.name)
else:
return r'{{%s}^\dag}' % str(self.name)
def _print_contents(self, printer, *args):
if self.is_annihilation:
return r'%s' % str(self.name)
else:
return r'Dagger(%s)' % str(self.name)
def _print_contents_pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if self.is_annihilation:
return pform
else:
return pform**prettyForm(u'\N{DAGGER}')
class BosonFockKet(Ket):
"""Fock state ket for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
def _eval_innerproduct_BosonFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return sqrt(self.n) * BosonFockKet(self.n - 1)
else:
return sqrt(self.n + 1) * BosonFockKet(self.n + 1)
class BosonFockBra(Bra):
"""Fock state bra for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Bra.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockKet
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
class BosonCoherentKet(Ket):
"""Coherent state ket for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Ket.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentBra
@classmethod
def _eval_hilbert_space(cls, label):
return HilbertSpace()
def _eval_innerproduct_BosonCoherentBra(self, bra, **hints):
if self.alpha == bra.alpha:
return Integer(1)
else:
return exp(-(abs(self.alpha)**2 + abs(bra.alpha)**2 - 2 * conjugate(bra.alpha) * self.alpha)/2)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return self.alpha * self
else:
return None
class BosonCoherentBra(Bra):
"""Coherent state bra for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Bra.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentKet
def _apply_operator_BosonOp(self, op, **options):
if not op.is_annihilation:
return self.alpha * self
else:
return None
|
dkarakats/edx-platform | refs/heads/master | common/test/acceptance/tests/studio/test_studio_acid_xblock.py | 14 | """
Acceptance tests for Studio related to the acid xblock.
"""
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
@skip('Jenkins builds are getting stuck on acid_block tests')
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
@skip('Jenkins builds are getting stuck on acid_block tests')
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
@skip('Jenkins builds are getting stuck on acid_block tests')
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
|
omarkohl/pytest | refs/heads/master | testing/cx_freeze/install_cx_freeze.py | 182 | """
Installs cx_freeze from source, but first patching
setup.py as described here:
http://stackoverflow.com/questions/25107697/compiling-cx-freeze-under-ubuntu
"""
import glob
import tarfile
import os
import sys
import platform
import py
if __name__ == '__main__':
if 'ubuntu' not in platform.version().lower():
print('Not Ubuntu, installing using pip. (platform.version() is %r)' %
platform.version())
res = os.system('pip install cx_freeze')
if res != 0:
sys.exit(res)
sys.exit(0)
rootdir = py.path.local.make_numbered_dir(prefix='cx_freeze')
res = os.system('pip install --download %s --no-use-wheel '
'cx_freeze' % rootdir)
if res != 0:
sys.exit(res)
packages = glob.glob('%s/*.tar.gz' % rootdir)
assert len(packages) == 1
tar_filename = packages[0]
tar_file = tarfile.open(tar_filename)
try:
tar_file.extractall(path=str(rootdir))
finally:
tar_file.close()
basename = os.path.basename(tar_filename).replace('.tar.gz', '')
setup_py_filename = '%s/%s/setup.py' % (rootdir, basename)
with open(setup_py_filename) as f:
lines = f.readlines()
line_to_patch = 'if not vars.get("Py_ENABLE_SHARED", 0):'
for index, line in enumerate(lines):
if line_to_patch in line:
indent = line[:line.index(line_to_patch)]
lines[index] = indent + 'if True:\n'
print('Patched line %d' % (index + 1))
break
else:
sys.exit('Could not find line in setup.py to patch!')
with open(setup_py_filename, 'w') as f:
f.writelines(lines)
os.chdir('%s/%s' % (rootdir, basename))
res = os.system('python setup.py install')
if res != 0:
sys.exit(res)
sys.exit(0)
|
ClodoCorp/debian-ganglia | refs/heads/master | gmond/python_modules/network/traffic1.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import threading
import time
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >> sys.stderr, "DEBUG: "+f % v
class UpdateTrafficThread(threading.Thread):
__slots__ = ( 'proc_file' )
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = 10
if "refresh_rate" in params:
self.refresh_rate = int(params["refresh_rate"])
self.target_device = params["target_device"]
self.metric = {}
self.proc_file = "/proc/net/dev"
self.stats_tab = {
"recv_bytes" : 0,
"recv_pkts" : 1,
"recv_errs" : 2,
"recv_drops" : 3,
"trans_bytes" : 8,
"trans_pkts" : 9,
"trans_errs" : 10,
"trans_drops" : 11,
}
self.stats = {}
self.stats_prev = {}
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
self.update_metric()
_Lock.release()
time.sleep(self.refresh_rate)
self.running = False
def update_metric(self):
f = open(self.proc_file, "r")
for l in f:
a = l.split(":")
dev = a[0].lstrip()
if dev != self.target_device: continue
dprint("%s", ">>update_metric")
self.stats = {}
_stats = a[1].split()
for name, index in self.stats_tab.iteritems():
self.stats[name+'_'+self.target_device] = int(_stats[index])
self.stats["time"] = time.time()
dprint("%s", self.stats)
if "time" in self.stats_prev:
dprint("%s: %d = %d - %d", "DO DIFF", self.stats["time"]-self.stats_prev["time"], self.stats["time"], self.stats_prev["time"])
d = self.stats["time"] - self.stats_prev["time"]
for name, cur in self.stats.iteritems():
self.metric[name] = float(cur - self.stats_prev[name])/d
self.stats_prev = self.stats.copy()
break
return
def metric_of(self, name):
val = 0
if name in self.metric:
_Lock.acquire()
val = self.metric[name]
_Lock.release()
return val
def metric_init(params):
global Desc_Skel, _Worker_Thread, Debug
print '[traffic1] Received the following parameters'
print params
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_of,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.3f',
'units' : 'XXX',
'slope' : 'both',
'description' : 'XXX',
'groups' : 'network',
}
if "refresh_rate" not in params:
params["refresh_rate"] = 10
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
if "target_device" not in params:
params["target_device"] = "lo"
target_device = params["target_device"]
_Worker_Thread = UpdateTrafficThread(params)
_Worker_Thread.start()
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_bytes_" + target_device,
"units" : "bytes/sec",
"description" : "received bytes per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_pkts_" + target_device,
"units" : "pkts/sec",
"description" : "received packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_errs_" + target_device,
"units" : "pkts/sec",
"description" : "received error packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_bytes_" + target_device,
"units" : "bytes/sec",
"description" : "transmitted bytes per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_pkts_" + target_device,
"units" : "pkts/sec",
"description" : "transmitted packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_errs_" + target_device,
"units" : "pkts/sec",
"description" : "transmitted error packets per sec",
}))
return descriptors
def create_desc(skel, prop):
d = skel.copy()
for k, v in prop.iteritems():
d[k] = v
return d
def metric_of(name):
return _Worker_Thread.metric_of(name)
def metric_cleanup():
_Worker_Thread.shutdown()
if __name__ == '__main__':
try:
params = {
"target_device": "eth0",
"debug" : True,
}
metric_init(params)
while True:
for d in descriptors:
v = d['call_back'](d['name'])
print ('value for %s is '+d['format']) % (d['name'], v)
time.sleep(5)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
except StandardError:
print sys.exc_info()[0]
os._exit(1)
|
isislovecruft/torflow | refs/heads/master | NetworkScanners/BwAuthority/bwauthority.py | 1 | #!/usr/bin/env python
from sys import argv as s_argv
from sys import path
from sys import exit
from subprocess import Popen
path.append("../../")
from TorCtl.TorUtil import plog as plog
from TorCtl.TorUtil import get_git_version as get_git_version
from signal import signal, SIGTERM, SIGKILL
# exit code to indicate scan completion
# make sure to update this in bwauthority_child.py as well
STOP_PCT_REACHED = 9
# path to git repos (.git)
PATH_TO_TORFLOW_REPO = '../../.git/'
PATH_TO_TORCTL_REPO = '../../TorCtl/.git/'
def main(argv):
(branch, head) = get_git_version(PATH_TO_TORFLOW_REPO)
plog('INFO', 'TorFlow Version: %s' % branch+' '+head)
(branch, head) = get_git_version(PATH_TO_TORCTL_REPO)
plog('INFO', 'TorCtl Version: %s' % branch+' '+head)
slice_num = 0
while True:
plog('INFO', 'Beginning time loop')
global p
p = Popen(["python", "bwauthority_child.py", argv[1], str(slice_num)])
p.wait()
if (p.returncode == 0):
slice_num += 1
elif (p.returncode == STOP_PCT_REACHED):
plog('INFO', 'restarting from slice 0')
slice_num = 0
elif (abs(p.returncode) == SIGKILL):
plog('WARN', 'Child process recieved SIGKILL, exiting')
exit()
elif (abs(p.returncode) == SIGTERM):
#XXX
# see: https://trac.torproject.org/projects/tor/ticket/3701
# if uncaught exceptions are raised in user-written handlers, TorCtl
# will kill the bwauthority_child process using os.kill() because sys.exit()
# only exits the thread in which the exception is caught.
# quote mikeperry: "we want this thing not do die. that is priority one"
# therefore: we restart the child process and hope for the best :-)
plog('WARN', 'Child process recieved SIGTERM')
#exit()
else:
plog('WARN', 'Child process returned %s' % p.returncode)
def sigterm_handler(signum, frame):
if p:
p.kill()
exit()
if __name__ == '__main__':
signal(SIGTERM, sigterm_handler)
try:
main(s_argv)
except KeyboardInterrupt:
p.kill()
plog('INFO', "Ctrl + C was pressed. Exiting ... ")
except Exception, e:
plog('ERROR', "An unexpected error occured.")
|
knehez/edx-platform | refs/heads/memooc | lms/djangoapps/verify_student/services.py | 27 | """
Implementation of "reverification" service to communicate with Reverification XBlock
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from opaque_keys.edx.keys import CourseKey
from verify_student.models import VerificationCheckpoint, VerificationStatus, SkippedReverification
log = logging.getLogger(__name__)
class ReverificationService(object):
"""
Reverification XBlock service
"""
def get_status(self, user_id, course_id, related_assessment_location):
"""Get verification attempt status against a user for a given
'checkpoint' and 'course_id'.
Args:
user_id(str): User Id string
course_id(str): A string of course id
related_assessment_location(str): Location of Reverification XBlock
Returns:
"skipped" if the user has skipped the re-verification or
Verification Status string if the user has submitted photo
verification attempt else None
"""
course_key = CourseKey.from_string(course_id)
has_skipped = SkippedReverification.check_user_skipped_reverification_exists(user_id, course_key)
if has_skipped:
return "skipped"
try:
checkpoint_status = VerificationStatus.objects.filter(
user_id=user_id,
checkpoint__course_id=course_key,
checkpoint__checkpoint_location=related_assessment_location
).latest()
return checkpoint_status.status
except ObjectDoesNotExist:
return None
def start_verification(self, course_id, related_assessment_location):
"""Create re-verification link against a verification checkpoint.
Args:
course_id(str): A string of course id
related_assessment_location(str): Location of Reverification XBlock
Returns:
Re-verification link
"""
course_key = CourseKey.from_string(course_id)
VerificationCheckpoint.objects.get_or_create(
course_id=course_key,
checkpoint_location=related_assessment_location
)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
unicode(related_assessment_location)
)
)
return re_verification_link
def skip_verification(self, user_id, course_id, related_assessment_location):
"""Add skipped verification attempt entry for a user against a given
'checkpoint'.
Args:
user_id(str): User Id string
course_id(str): A string of course_id
related_assessment_location(str): Location of Reverification XBlock
Returns:
None
"""
course_key = CourseKey.from_string(course_id)
checkpoint = VerificationCheckpoint.objects.get(
course_id=course_key,
checkpoint_location=related_assessment_location
)
# user can skip a reverification attempt only if that user has not already
# skipped an attempt
try:
SkippedReverification.add_skipped_reverification_attempt(checkpoint, user_id, course_key)
except IntegrityError:
log.exception("Skipped attempt already exists for user %s: with course %s:", user_id, unicode(course_id))
def get_attempts(self, user_id, course_id, related_assessment_location):
"""Get re-verification attempts against a user for a given 'checkpoint'
and 'course_id'.
Args:
user_id(str): User Id string
course_id(str): A string of course id
related_assessment_location(str): Location of Reverification XBlock
Returns:
Number of re-verification attempts of a user
"""
course_key = CourseKey.from_string(course_id)
return VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
|
mathspace/django | refs/heads/master | tests/test_runner/__init__.py | 12133432 | |
keiserlab/e3fp-paper | refs/heads/master | e3fp_paper/sea_utils/__init__.py | 12133432 | |
brutalic/pynet_brutal | refs/heads/master | class8/DJANGOX/djproject/net_system/migrations/__init__.py | 12133432 | |
noironetworks/neutron | refs/heads/master | neutron/tests/unit/agent/ovsdb/native/__init__.py | 12133432 | |
noironetworks/neutron | refs/heads/master | neutron/conf/db/__init__.py | 12133432 | |
gangadhar-kadam/helpdesk-frappe | refs/heads/develop | frappe/patches/__init__.py | 12133432 | |
atmtools/typhon | refs/heads/master | typhon/tests/math/__init__.py | 12133432 | |
darjeeling/django | refs/heads/master | tests/gis_tests/distapp/models.py | 71 | from django.contrib.gis.db import models
from ..utils import gisfield_may_be_null
class NamedModel(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
radius = models.IntegerField(default=10000)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
radius = models.IntegerField(default=10000)
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
|
seanfisk/buzzword-bingo-server | refs/heads/master | django/conf/locale/et/formats.py | 316 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i:s'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' ' # Non-breaking space
# NUMBER_GROUPING =
|
CS-SI/QGIS | refs/heads/master | python/plugins/processing/algs/qgis/Orthogonalize.py | 16 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Orthogonalize.py
----------------
Date : December 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessing,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class Orthogonalize(QgisFeatureBasedAlgorithm):
MAX_ITERATIONS = 'MAX_ITERATIONS'
DISTANCE_THRESHOLD = 'DISTANCE_THRESHOLD'
ANGLE_TOLERANCE = 'ANGLE_TOLERANCE'
def tags(self):
return self.tr('rectangle,perpendicular,right,angles,square,quadrilateralise').split(',')
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
self.max_iterations = None
self.angle_tolerance = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.ANGLE_TOLERANCE,
self.tr('Maximum angle tolerance (degrees)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0, maxValue=45.0, defaultValue=15.0))
max_iterations = QgsProcessingParameterNumber(self.MAX_ITERATIONS,
self.tr('Maximum algorithm iterations'),
type=QgsProcessingParameterNumber.Integer,
minValue=1, maxValue=10000, defaultValue=1000)
max_iterations.setFlags(max_iterations.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(max_iterations)
def name(self):
return 'orthogonalize'
def displayName(self):
return self.tr('Orthogonalize')
def outputName(self):
return self.tr('Orthogonalized')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorPolygon, QgsProcessing.TypeVectorLine]
def prepareAlgorithm(self, parameters, context, feedback):
self.max_iterations = self.parameterAsInt(parameters, self.MAX_ITERATIONS, context)
self.angle_tolerance = self.parameterAsDouble(parameters, self.ANGLE_TOLERANCE, context)
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
output_geometry = input_geometry.orthogonalize(1.0e-8, self.max_iterations, self.angle_tolerance)
if not output_geometry:
raise QgsProcessingException(
self.tr('Error orthogonalizing geometry'))
feature.setGeometry(output_geometry)
return [feature]
|
OpenLD/enigma2-wetek | refs/heads/master | lib/python/Plugins/Extensions/StartKodi/plugin.py | 1 | ### Start Kodi
### check for space and whether installed
### main work done in enigma2.sh, here we do just a touch
### TODO: installation error checking is missing, network state...
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Plugins.Plugin import PluginDescriptor
from Screens.MessageBox import MessageBox
import os
#from Components.Ipkg import IpkgComponent
#from Screens.Ipkg import Ipkg
from enigma import quitMainloop
from Plugins.Extensions.StartKodi.installsomething import InstallSomething
class StartKodi2(Screen):
kodi_name = "kodi-amlogic"
kodineeds = 200 # TODO: check real needs, more likely to be ~ 300MB
caninstall = False
isinstalled = False
skin = """
<screen position="center,center" size="500,200" title="Kodi Media Center">
<widget name="text" position="30,30" size="360,25" font="Regular;25" />
<widget name="sd_label" position="30,100" size="310,25" font="Regular;20" />
<widget name="freespace_label" position="30,125" size="310,25" font="Regular;20" />
<widget name="installed_label" position="30,150" size="310,25" font="Regular;20" />
<widget name="sd" position="340,100" size="150,25" font="Regular;20" />
<widget name="freespace" position="340,125" size="150,25" font="Regular;20" />
<widget name="installed" position="340,150" size="150,25" font="Regular;20" />
</screen>"""
def __init__(self, session, args = 0):
self.session = session
Screen.__init__(self, session)
freembsd = str(self.getFreeSD())
freemb = str(self.getFreeNand())
isInstalled = str(self.isKodiInstalled())
self["text"] = Label(_("Please press OK to start Kodi..."))
self["sd_label"] = Label(_("Kodi/extra partition free space:"))
self["freespace_label"] = Label(_("System partition free space:"))
self["installed_label"] = Label(_("Kodi installed:"))
self["sd"] = Label(freembsd + " MB")
self["freespace"] = Label(freemb + " MB")
self["installed"] = Label(isInstalled)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.close,
})
self.onShown.append(self.onFirstShown) ### !!! A must to avoid modal crap
def onFirstShown(self):
self.onShown.remove(self.onFirstShown) ### avoid perpetual installs
if (self.isinstalled):
self["text"] = Label(_("\n Please press OK to start Kodi..."))
elif (self.caninstall is False):
self["text"] = Label(_("\n WARNING: \n There is not enough space to install Kodi!"))
else:
self.session.openWithCallback(self.doInstallCallback, MessageBox, _("\n Kodi not present. Proceed with install?"))
### wo callback message is shown after install
# self.session.open(MessageBox,_("\n Kodi not present, installing, please wait..."), MessageBox.TYPE_INFO, timeout = 5)
# self["text"] = Label(_("\n Kodi not present, installing, please wait..."))
# os.system("opkg install kodi-amlogic")
# os.system("touch /etc/.kodistart")
# Try more civilized download
# self.KodiInstallation = InstallSomething(self.session, self.kodi_name)
# self.KodiInstallation.__install__()
# self.isinstalled = True
def doInstallCallback(self, result):
if result:
self.KodiInstallation = InstallSomething(self.session, [self.kodi_name])
self.KodiInstallation.__install__()
self.isinstalled = True # actually very bad, we did not check for errors
os.system("touch /etc/.kodistart") # but enigma2.sh checks for /usr/bin/xbmc
### TODO: done touch(es) should go here
def ok(self):
if (self.isinstalled):
# self.[text] = Label(_("Starting Kodi..."))
# self["text"].hide()
# self["text"].show()
# StartKodi2.already_shown = False
# StartKodi2.hide(self)
# StartKodi2.show(self)
# StartKodi2.update(self)
os.system("touch /etc/.kodistart")
quitMainloop(3)
else:
self.close()
### TODO: check portability (busybox vs coreutils)
def getFreeNand(self):
os.system('sync ; sync ; sync' )
sizeread = os.popen("df | grep %s | tr -s ' '" % 'root')
c = sizeread.read().strip().split(" ")
sizeread.close()
free = int(c[3])/1024
if (free > self.kodineeds):
self.caninstall = True
else:
self.caninstall = False
return free
#hopefully returrn free MBs in NAND/uSD
#self["lab_flash"].setText("%sB out of %sB" % (c[3], c[1]))
#self["Used"].setText("Used: %s" % c[2])
#self["Available"].setText("Available: %s" % c[3])
#self["Use in %"].setText("Use: %s" % c[4])
#self["Partition"].setText("Partition: %s" % c[0])
### TODO: check if partition exists check portability (busybox vs coreutils)
def getFreeSD(self):
# os.system('sync ; sync ; sync' )
sizeread = os.popen("df | grep %s | tr -s ' '" % 'uSDextra')
c = sizeread.read().strip().split(" ")
sizeread.close()
if os.path.exists("/media/uSDextra"):
free = int(c[3])/1024
else:
free = "Not available"
return free
### not very clever...
def isKodiInstalled(self):
if os.path.exists("/usr/lib/kodi/kodi.bin"):
self.isinstalled = True
return True
else:
self.isinstalled = False
return False
### Not used at the moment
class SysMessage(Screen):
skin = """
<screen position="150,200" size="450,200" title="System Message" >
<widget source="text" position="0,0" size="450,200" font="Regular;20" halign="center" valign="center" render="Label" />
<ePixmap pixmap="icons/input_error.png" position="5,5" size="53,53" alphatest="on" />
</screen>"""
def __init__(self, session, message):
from Components.Sources.StaticText import StaticText
Screen.__init__(self, session)
self["text"] = StaticText(message)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.ok,
})
def ok(self):
self.close()
### MENU service stuff
def main(session, **kwargs):
session.open(StartKodi2)
def menu(menuid, **kwargs):
if menuid == "mainmenu":
return [(_("Kodi Media Center"), main, "start_kodi", 44)]
return []
def Plugins(**kwargs):
return [
PluginDescriptor(name = _("Kodi Media Center"), description = _("Kodi Media Center for WeTek Play"), where = PluginDescriptor.WHERE_PLUGINMENU, icon = "kodi.png", needsRestart = False, fnc = main),
PluginDescriptor(name = _("Kodi Media Center"), description = _("Play back media files"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu)
]
# PluginDescriptor(name = _("StartKodi"), description = _("Play back media files"), where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc = menu)
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/babel/localtime/_unix.py | 160 | from __future__ import with_statement
import os
import re
import sys
import pytz
import subprocess
_systemconfig_tz = re.compile(r'^Time Zone: (.*)$(?m)')
def _tz_from_env(tzenv):
if tzenv[0] == ':':
tzenv = tzenv[1:]
# TZ specifies a file
if os.path.exists(tzenv):
with open(tzenv, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
# TZ specifies a zoneinfo zone.
try:
tz = pytz.timezone(tzenv)
# That worked, so we return this:
return tz
except pytz.UnknownTimeZoneError:
raise pytz.UnknownTimeZoneError(
"tzlocal() does not support non-zoneinfo timezones like %s. \n"
"Please use a timezone in the form of Continent/City")
def _get_localzone(_root='/'):
"""Tries to find the local timezone configuration.
This method prefers finding the timezone name and passing that to pytz,
over passing in the localtime file, as in the later case the zoneinfo
name is unknown.
The parameter _root makes the function look for files like /etc/localtime
beneath the _root directory. This is primarily used by the tests.
In normal usage you call the function without parameters.
"""
tzenv = os.environ.get('TZ')
if tzenv:
return _tz_from_env(tzenv)
# This is actually a pretty reliable way to test for the local time
# zone on operating systems like OS X. On OS X especially this is the
# only one that actually works.
try:
link_dst = os.readlink('/etc/localtime')
except OSError:
pass
else:
pos = link_dst.find('/zoneinfo/')
if pos >= 0:
zone_name = link_dst[pos + 10:]
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# If we are on OS X now we are pretty sure that the rest of the
# code will fail and just fall through until it hits the reading
# of /etc/localtime and using it without name. At this point we
# can invoke systemconfig which internally invokes ICU. ICU itself
# does the same thing we do (readlink + compare file contents) but
# since it knows where the zone files are that should be a bit
# better than reimplementing the logic here.
if sys.platform == 'darwin':
c = subprocess.Popen(['systemsetup', '-gettimezone'],
stdout=subprocess.PIPE)
sys_result = c.communicate()[0]
c.wait()
tz_match = _systemconfig_tz.search(sys_result)
if tz_match is not None:
zone_name = tz_match.group(1)
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# Now look for distribution specific configuration files
# that contain the timezone name.
tzpath = os.path.join(_root, 'etc/timezone')
if os.path.exists(tzpath):
with open(tzpath, 'rb') as tzfile:
data = tzfile.read()
# Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file.
# That's a misconfiguration, but we need to handle it gracefully:
if data[:5] != 'TZif2':
etctz = data.strip().decode()
# Get rid of host definitions and comments:
if ' ' in etctz:
etctz, dummy = etctz.split(' ', 1)
if '#' in etctz:
etctz, dummy = etctz.split('#', 1)
return pytz.timezone(etctz.replace(' ', '_'))
# CentOS has a ZONE setting in /etc/sysconfig/clock,
# OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
# Gentoo has a TIMEZONE setting in /etc/conf.d/clock
# We look through these files for a timezone:
zone_re = re.compile('\s*ZONE\s*=\s*\"')
timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"')
end_re = re.compile('\"')
for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rt') as tzfile:
data = tzfile.readlines()
for line in data:
# Look for the ZONE= setting.
match = zone_re.match(line)
if match is None:
# No ZONE= setting. Look for the TIMEZONE= setting.
match = timezone_re.match(line)
if match is not None:
# Some setting existed
line = line[match.end():]
etctz = line[:end_re.search(line).start()]
# We found a timezone
return pytz.timezone(etctz.replace(' ', '_'))
# No explicit setting existed. Use localtime
for filename in ('etc/localtime', 'usr/local/etc/localtime'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
|
bgris/ODL_bgris | refs/heads/master | lib/python3.5/site-packages/pip/vcs/subversion.py | 343 | from __future__ import absolute_import
import logging
import os
import re
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.index import Link
from pip.utils import rmtree, display_path
from pip.utils.logging import indent_log
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
output = self.run_command(
['info', location],
show_stdout=False,
extra_environ={'LANG': 'C'},
)
match = _svn_url_re.search(output)
if not match:
logger.warning(
'Cannot determine URL of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warning(
'Cannot determine revision of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return url, None
return url, match.group(1)
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
url = self.remove_auth_from_url(url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
self.run_command(
['export'] + rev_options + [url, location],
show_stdout=False)
def switch(self, dest, url, rev_options):
self.run_command(['switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
self.run_command(['update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
url = self.remove_auth_from_url(url)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
# FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
entries_path = os.path.join(location, self.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = self.run_command(
['info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if repo is None:
return None
# FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
@staticmethod
def remove_auth_from_url(url):
# Return a copy of url with 'username:password@' removed.
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
# parsed url
purl = urllib_parse.urlsplit(url)
stripped_netloc = \
purl.netloc.split('@')[-1]
# stripped url
url_pieces = (
purl.scheme, stripped_netloc, purl.path, purl.query, purl.fragment
)
surl = urllib_parse.urlunsplit(url_pieces)
return surl
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urllib_parse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
|
anish/buildbot | refs/heads/master | master/buildbot/test/integration/interop/test_integration_secrets.py | 1 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from parameterized import parameterized
from twisted.internet import defer
from buildbot.process.properties import Interpolate
from buildbot.reporters.http import HttpStatusPush
from buildbot.test.fake.secrets import FakeSecretStorage
from buildbot.test.util.integration import RunMasterBase
class FakeSecretReporter(HttpStatusPush):
def send(self, build):
assert self.auth == ('user', 'myhttppasswd')
self.reported = True
class SecretsConfig(RunMasterBase):
@parameterized.expand([
('with_interpolation', True),
('plain_command', False),
])
@defer.inlineCallbacks
def test_secret(self, name, use_interpolation):
c = masterConfig(use_interpolation)
yield self.setupConfig(c)
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['buildid'], 1)
# check the command line
res = yield self.checkBuildStepLogExist(build, "echo <foo>")
# also check the secrets are replaced in argv
yield self.checkBuildStepLogExist(build, "argv:.*echo.*<foo>", regex=True)
# also check that the correct value goes to the command
if os.name == "posix" and use_interpolation:
res &= yield self.checkBuildStepLogExist(build, "The password was there")
self.assertTrue(res)
# at this point, build contains all the log and steps info that is in the db
# we check that our secret is not in there!
self.assertNotIn("bar", repr(build))
self.assertTrue(c['services'][0].reported)
@parameterized.expand([
('with_interpolation', True),
('plain_command', False),
])
@defer.inlineCallbacks
def test_secretReconfig(self, name, use_interpolation):
c = masterConfig(use_interpolation)
yield self.setupConfig(c)
c['secretsProviders'] = [FakeSecretStorage(
secretdict={"foo": "different_value", "something": "more"})]
yield self.master.reconfig()
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['buildid'], 1)
res = yield self.checkBuildStepLogExist(build, "echo <foo>")
self.assertTrue(res)
# at this point, build contains all the log and steps info that is in the db
# we check that our secret is not in there!
self.assertNotIn("different_value", repr(build))
class SecretsConfigPB(SecretsConfig):
proto = "pb"
# master configuration
def masterConfig(use_interpolation):
c = {}
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.plugins import schedulers, steps, util
c['services'] = [FakeSecretReporter('http://example.com/hook',
auth=('user', Interpolate('%(secret:httppasswd)s')))]
c['schedulers'] = [
schedulers.ForceScheduler(
name="force",
builderNames=["testy"])]
c['secretsProviders'] = [FakeSecretStorage(
secretdict={"foo": "bar", "something": "more", 'httppasswd': 'myhttppasswd'})]
f = BuildFactory()
if use_interpolation:
if os.name == "posix":
# on posix we can also check whether the password was passed to the command
command = Interpolate('echo %(secret:foo)s | sed "s/bar/The password was there/"')
else:
command = Interpolate('echo %(secret:foo)s')
else:
command = ['echo', util.Secret('foo')]
f.addStep(steps.ShellCommand(command=command))
c['builders'] = [
BuilderConfig(name="testy",
workernames=["local1"],
factory=f)]
return c
|
BaxterStockman/ansible-modules-core | refs/heads/devel | windows/win_get_url.py | 4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Paul Durivage <paul.durivage@rackspace.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_get_url
version_added: "1.7"
short_description: Fetches a file from a given URL
description:
- Fetches a file from a URL and saves to locally
author:
- "Paul Durivage (@angstwad)"
- "Takeshi Kuramochi (tksarah)"
options:
url:
description:
- The full URL of a file to download
required: true
default: null
dest:
description:
- The absolute path of the location to save the file at the URL. Be sure
to include a filename and extension as appropriate.
required: true
default: null
force:
description:
- If C(yes), will always download the file. If C(no), will only
download the file if it does not exist or the remote file has been
modified more recently than the local file. This works by sending
an http HEAD request to retrieve last modified time of the requested
resource, so for this to work, the remote web server must support
HEAD requests.
version_added: "2.0"
required: false
choices: [ "yes", "no" ]
default: yes
username:
description:
- Basic authentication username
required: false
default: null
password:
description:
- Basic authentication password
required: false
default: null
skip_certificate_validation:
description:
- Skip SSL certificate validation if true
required: false
default: false
proxy_url:
description:
- The full URL of the proxy server to download through.
version_added: "2.0"
required: false
proxy_username:
description:
- Proxy authentication username
version_added: "2.0"
required: false
proxy_password:
description:
- Proxy authentication password
version_added: "2.0"
required: false
'''
EXAMPLES = r'''
# Downloading a JPEG and saving it to a file with the ansible command.
# Note the "dest" is quoted rather instead of escaping the backslashes
$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\\Users\\Administrator\\earthrise.jpg'" all
# Playbook example
- name: Download earthrise.jpg to 'C:\\Users\\RandomUser\\earthrise.jpg'
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
force: no
- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through a proxy server.
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
proxy_url: http://10.0.0.1:8080
proxy_username: username
proxy_password: password
'''
RETURN = '''
url:
description: requested url
returned: always
type: string
sample: 'http://www.example.com/earthrise.jpg'
dest:
description: destination file/path
returned: always
type: string
sample: 'C:\\Users\\RandomUser\\earthrise.jpg'
'''
|
NINAnor/QGIS | refs/heads/master | python/plugins/processing/algs/qgis/ui/__init__.py | 12133432 | |
PiperProject/piper | refs/heads/master | src/core/__init__.py | 12133432 | |
jboss-dockerfiles/dogen | refs/heads/develop | dogen/schema/__init__.py | 12133432 | |
AlperSaltabas/OR_Tools_Google_API | refs/heads/master | documentation/tutorials/python/chap2/cp_is_fun4.py | 37 | # Copyright 2010-2011 Google
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This problem has 72 different solutions in base 10.
Use of SolutionCollectors.
Use of Solve().
Use of gflags to choose the base.
Change the time limit of the solver.
"""
import gflags, sys
from constraint_solver import pywrapcp
from os import abort
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('base', 10, "Base used to solve the problem.")
gflags.DEFINE_bool('print_all_solutions', False, "Print all solutions?")
gflags.DEFINE_integer('time_limit', 10000, "Time limit in milliseconds")
def CPIsFun():
# Use some profiling and change the default parameters of the solver
solver_params = pywrapcp.SolverParameters()
# Change the profile level
solver_params.profile_level = pywrapcp.SolverParameters.NORMAL_PROFILING
# Constraint programming engine
solver = pywrapcp.Solver('CP is fun!', solver_params);
kBase = gflags.FLAGS.base
# Decision variables
digits = range(0, kBase)
digits_without_zero = digits[1:]
c = solver.IntVar(digits_without_zero, 'C');
p = solver.IntVar(digits, 'P');
i = solver.IntVar(digits_without_zero, 'I');
s = solver.IntVar(digits, 'S');
f = solver.IntVar(digits_without_zero, 'F');
u = solver.IntVar(digits, 'U');
n = solver.IntVar(digits, 'N');
t = solver.IntVar(digits_without_zero, 'T');
r = solver.IntVar(digits, 'R');
e = solver.IntVar(digits, 'E');
# We need to group variables in a list to be able to use
# the global constraint AllDifferent
letters = [c, p, i, s, f, u, n, t, r, e]
# Check if we have enough digits
assert kBase >= len(letters)
# Constraints
solver.Add(solver.AllDifferent(letters))
# CP + IS + FUN = TRUE
term1 = solver.Sum([kBase*c, p])
term2 = solver.Sum([kBase*i, s])
term3 = solver.Sum([kBase*kBase*f, kBase*u, n])
sum_terms = solver.Sum([term1, term2, term3])
sum_value = solver.Sum([kBase*kBase*kBase*t, kBase*kBase*r, kBase*u, e])
solver.Add(sum_terms == sum_value)
all_solutions = solver.AllSolutionCollector()
# Add the interesting variables to the SolutionCollector
all_solutions.Add(letters)
db = solver.Phase(letters, solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
# Add some time limit
time_limit = solver.TimeLimit(gflags.FLAGS.time_limit);
solver.Solve(db, all_solutions, time_limit)
# Retrieve the solutions
numberSolutions = all_solutions.SolutionCount()
print "Number of solutions: ", numberSolutions
if (gflags.FLAGS.print_all_solutions):
for index in range(numberSolutions):
print "C=", all_solutions.Value(index, c), " P=", all_solutions.Value(index, p), " I=", \
all_solutions.Value(index, i), " S=", all_solutions.Value(index, s), " F=", all_solutions.Value(index, f), \
" U=", all_solutions.Value(index, u), " N=", all_solutions.Value(index, n), " T=", all_solutions.Value(index, t), \
" R=", all_solutions.Value(index, r), " E=", all_solutions.Value(index, e)
# Save profile in file
solver.ExportProfilingOverview("profile.txt")
return
if __name__ == '__main__':
try:
FLAGS(sys.argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
CPIsFun()
|
marcoantoniooliveira/labweb | refs/heads/master | tests/dynamic_fixtures.py | 57 | from django_dynamic_fixture.ddf import DataFixture
from django_dynamic_fixture.fixture_algorithms.sequential_fixture import \
SequentialDataFixture
class OscarDynamicDataFixtureClass(SequentialDataFixture):
"""
This is need to support Oscar's PhoneNumberField: it's a custom type, so we
must provide values for testing
"""
def phonenumberfield_config(self, field, key):
return '+49 351 3296645'
|
peak6/st2 | refs/heads/master | st2stream/tests/__init__.py | 12133432 | |
danithaca/berrypicking | refs/heads/master | django/advanced/advanced/__init__.py | 12133432 | |
hguemar/cinder | refs/heads/master | cinder/volume/targets/__init__.py | 12133432 | |
mrrrgn/build-mozharness | refs/heads/master | mozharness/lib/__init__.py | 12133432 | |
saeki-masaki/cinder | refs/heads/master | cinder/tests/unit/api/openstack/__init__.py | 12133432 | |
pybrain/pybrain | refs/heads/master | pybrain/tests/unittests/rl/__init__.py | 12133432 | |
eggsandbeer/scheduler | refs/heads/master | db/dao/__init__.py | 12133432 | |
MiltosD/CEFELRC | refs/heads/master | lib/python2.7/site-packages/django_jenkins/tasks/run_pep8.py | 3 | # -*- coding: utf-8 -*-
import re
import os
import sys
import pep8
from optparse import make_option
from django_jenkins.functions import relpath
from django_jenkins.tasks import BaseTask, get_apps_locations
class Task(BaseTask):
option_list = [make_option("--pep8-exclude",
dest="pep8-exclude", default=pep8.DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
pep8.DEFAULT_EXCLUDE),
make_option("--pep8-select", dest="pep8-select",
help="select errors and warnings (e.g. E,W6)"),
make_option("--pep8-ignore", dest="pep8-ignore",
help="skip errors and warnings (e.g. E4,W)")]
def __init__(self, test_labels, options):
super(Task, self).__init__(test_labels, options)
self.test_all = options['test_all']
if options.get('pep8_file_output', True):
output_dir = options['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.output = open(os.path.join(output_dir, 'pep8.report'), 'w')
else:
self.output = sys.stdout
self.pep8_options = ['--exclude=%s' % options['pep8-exclude']]
if options['pep8-select']:
self.pep8_options.append('--select=%s' % options['pep8-select'])
if options['pep8-ignore']:
self.pep8_options.append('--ignore=%s' % options['pep8-ignore'])
def teardown_test_environment(self, **kwargs):
locations = get_apps_locations(self.test_labels, self.test_all)
pep8.process_options(self.pep8_options + locations)
# run pep8 tool with captured output
def report_error(instance, line_number, offset, text, check):
code = text[:4]
if pep8.ignore_code(code):
return
message = re.sub(r'([WE]\d+)', r'[\1] PEP8:', text)
sourceline = instance.line_offset + line_number
self.output.write('%s:%s: %s\n' % (instance.filename, sourceline, message))
pep8.Checker.report_error = report_error
for location in locations:
pep8.input_dir(relpath(location), runner=pep8.input_file)
self.output.close()
|
hiearth/langstyle | refs/heads/master | langstyle/entity/user.py | 1 | #!/usr/bin/env python
class User:
def __init__(self,user_id=None, user_name=None, password=None, email=None, language_map_id=None):
self.id = user_id
self.name = user_name
self.password = password
self.email = email
self.language_map_id = language_map_id |
proxysh/Safejumper-for-Mac | refs/heads/master | buildlinux/env32/lib/python2.7/site-packages/Crypto/Random/_UserFriendlyRNG.py | 111 | # -*- coding: utf-8 -*-
#
# Random/_UserFriendlyRNG.py : A user-friendly random number generator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import os
import threading
import struct
import time
from math import floor
from Crypto.Random import OSRNG
from Crypto.Random.Fortuna import FortunaAccumulator
class _EntropySource(object):
def __init__(self, accumulator, src_num):
self._fortuna = accumulator
self._src_num = src_num
self._pool_num = 0
def feed(self, data):
self._fortuna.add_random_event(self._src_num, self._pool_num, data)
self._pool_num = (self._pool_num + 1) & 31
class _EntropyCollector(object):
def __init__(self, accumulator):
self._osrng = OSRNG.new()
self._osrng_es = _EntropySource(accumulator, 255)
self._time_es = _EntropySource(accumulator, 254)
self._clock_es = _EntropySource(accumulator, 253)
def reinit(self):
# Add 256 bits to each of the 32 pools, twice. (For a total of 16384
# bits collected from the operating system.)
for i in range(2):
block = self._osrng.read(32*32)
for p in range(32):
self._osrng_es.feed(block[p*32:(p+1)*32])
block = None
self._osrng.flush()
def collect(self):
# Collect 64 bits of entropy from the operating system and feed it to Fortuna.
self._osrng_es.feed(self._osrng.read(8))
# Add the fractional part of time.time()
t = time.time()
self._time_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
# Add the fractional part of time.clock()
t = time.clock()
self._clock_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
class _UserFriendlyRNG(object):
def __init__(self):
self.closed = False
self._fa = FortunaAccumulator.FortunaAccumulator()
self._ec = _EntropyCollector(self._fa)
self.reinit()
def reinit(self):
"""Initialize the random number generator and seed it with entropy from
the operating system.
"""
# Save the pid (helps ensure that Crypto.Random.atfork() gets called)
self._pid = os.getpid()
# Collect entropy from the operating system and feed it to
# FortunaAccumulator
self._ec.reinit()
# Override FortunaAccumulator's 100ms minimum re-seed interval. This
# is necessary to avoid a race condition between this function and
# self.read(), which that can otherwise cause forked child processes to
# produce identical output. (e.g. CVE-2013-1445)
#
# Note that if this function can be called frequently by an attacker,
# (and if the bits from OSRNG are insufficiently random) it will weaken
# Fortuna's ability to resist a state compromise extension attack.
self._fa._forget_last_reseed()
def close(self):
self.closed = True
self._osrng = None
self._fa = None
def flush(self):
pass
def read(self, N):
"""Return N bytes from the RNG."""
if self.closed:
raise ValueError("I/O operation on closed file")
if not isinstance(N, (long, int)):
raise TypeError("an integer is required")
if N < 0:
raise ValueError("cannot read to end of infinite stream")
# Collect some entropy and feed it to Fortuna
self._ec.collect()
# Ask Fortuna to generate some bytes
retval = self._fa.random_data(N)
# Check that we haven't forked in the meantime. (If we have, we don't
# want to use the data, because it might have been duplicated in the
# parent process.
self._check_pid()
# Return the random data.
return retval
def _check_pid(self):
# Lame fork detection to remind developers to invoke Random.atfork()
# after every call to os.fork(). Note that this check is not reliable,
# since process IDs can be reused on most operating systems.
#
# You need to do Random.atfork() in the child process after every call
# to os.fork() to avoid reusing PRNG state. If you want to avoid
# leaking PRNG state to child processes (for example, if you are using
# os.setuid()) then you should also invoke Random.atfork() in the
# *parent* process.
if os.getpid() != self._pid:
raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()")
class _LockingUserFriendlyRNG(_UserFriendlyRNG):
def __init__(self):
self._lock = threading.Lock()
_UserFriendlyRNG.__init__(self)
def close(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.close(self)
finally:
self._lock.release()
def reinit(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.reinit(self)
finally:
self._lock.release()
def read(self, bytes):
self._lock.acquire()
try:
return _UserFriendlyRNG.read(self, bytes)
finally:
self._lock.release()
class RNGFile(object):
def __init__(self, singleton):
self.closed = False
self._singleton = singleton
# PEP 343: Support for the "with" statement
def __enter__(self):
"""PEP 343 support"""
def __exit__(self):
"""PEP 343 support"""
self.close()
def close(self):
# Don't actually close the singleton, just close this RNGFile instance.
self.closed = True
self._singleton = None
def read(self, bytes):
if self.closed:
raise ValueError("I/O operation on closed file")
return self._singleton.read(bytes)
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
_singleton_lock = threading.Lock()
_singleton = None
def _get_singleton():
global _singleton
_singleton_lock.acquire()
try:
if _singleton is None:
_singleton = _LockingUserFriendlyRNG()
return _singleton
finally:
_singleton_lock.release()
def new():
return RNGFile(_get_singleton())
def reinit():
_get_singleton().reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _get_singleton().read(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
|
LorenzReinhart/ONOSnew | refs/heads/master | tools/test/topos/newFuncTopo.py | 29 | #!/usr/bin/python
"""
Custom topology for Mininet
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Host, RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
class VLANHost( Host ):
def config( self, vlan=100, **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ifconfig %s inet 0' % intf )
self.cmd( 'vconfig add %s %d' % ( intf, vlan ) )
self.cmd( 'ifconfig %s.%d inet %s' % ( intf, vlan, params['ip'] ) )
newName = '%s.%d' % ( intf, vlan )
intf.name = newName
self.nameToIntf[ newName ] = intf
return r
class IPv6Host( Host ):
def config( self, v6Addr='1000:1/64', **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ifconfig %s inet 0' % intf )
self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
return r
class dualStackHost( Host ):
def config( self, v6Addr='2000:1/64', **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
return r
class MyTopo( Topo ):
def __init__( self ):
# Initialize topology
Topo.__init__( self )
# Switch S5 Hosts
host1=self.addHost( 'h1', ip='10.1.0.2/24' )
host2=self.addHost( 'h2', cls=IPv6Host, v6Addr='1000::2/64' )
host3=self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='2000::2/64' )
#VLAN hosts
host4=self.addHost( 'h4', ip='100.1.0.2/24', cls=VLANHost, vlan=100 )
host5=self.addHost( 'h5', ip='200.1.0.2/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host6=self.addHost( 'h6', ip='11.1.0.2/24' )
host7=self.addHost( 'h7', ip='12.1.0.2/24' )
#Multicast Sender
host8=self.addHost( 'h8', ip='10.1.0.4/24' )
# Switch S6 Hosts
host9=self.addHost( 'h9', ip='10.1.0.5/24' )
host10=self.addHost( 'h10', cls=IPv6Host, v6Addr='1000::3/64' )
host11=self.addHost( 'h11', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='2000::3/64' )
#VLAN hosts
host12=self.addHost( 'h12', ip='100.1.0.3/24', cls=VLANHost, vlan=100 )
host13=self.addHost( 'h13', ip='200.1.0.3/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host14=self.addHost( 'h14', ip='11.1.0.3/24' )
host15=self.addHost( 'h15', ip='12.1.0.3/24' )
#Multicast Receiver
host16=self.addHost( 'h16', ip='10.1.0.7/24' )
# Switch S7 Hosts
host17=self.addHost( 'h17', ip='10.1.0.8/24' )
host18=self.addHost( 'h18', cls=IPv6Host, v6Addr='1000::4/64' )
host19=self.addHost( 'h19', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='2000::4/64' )
#VLAN hosts
host20=self.addHost( 'h20', ip='100.1.0.4/24', cls=VLANHost, vlan=100 )
host21=self.addHost( 'h21', ip='200.1.0.4/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host22=self.addHost( 'h22', ip='11.1.0.4/24' )
host23=self.addHost( 'h23', ip='12.1.0.4/24' )
#Multicast Receiver
host24=self.addHost( 'h24', ip='10.1.0.10/24' )
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
self.addLink(s5,host1)
self.addLink(s5,host2)
self.addLink(s5,host3)
self.addLink(s5,host4)
self.addLink(s5,host5)
self.addLink(s5,host6)
self.addLink(s5,host7)
self.addLink(s5,host8)
self.addLink(s6,host9)
self.addLink(s6,host10)
self.addLink(s6,host11)
self.addLink(s6,host12)
self.addLink(s6,host13)
self.addLink(s6,host14)
self.addLink(s6,host15)
self.addLink(s6,host16)
self.addLink(s7,host17)
self.addLink(s7,host18)
self.addLink(s7,host19)
self.addLink(s7,host20)
self.addLink(s7,host21)
self.addLink(s7,host22)
self.addLink(s7,host23)
self.addLink(s7,host24)
self.addLink(s1,s2)
self.addLink(s1,s3)
self.addLink(s1,s4)
self.addLink(s1,s5)
self.addLink(s2,s3)
self.addLink(s2,s5)
self.addLink(s2,s6)
self.addLink(s3,s4)
self.addLink(s3,s6)
self.addLink(s4,s7)
topos = { 'mytopo': ( lambda: MyTopo() ) }
# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
def setupNetwork():
"Create network"
topo = MyTopo()
network = Mininet(topo=topo, autoSetMacs=True, controller=None)
network.start()
CLI( network )
network.stop()
if __name__ == '__main__':
setLogLevel('info')
#setLogLevel('debug')
setupNetwork()
|
newvem/pytz | refs/heads/master | pytz/zoneinfo/HST.py | 9 | '''tzinfo timezone information for HST.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class HST(StaticTzInfo):
'''HST timezone definition. See datetime.tzinfo for details'''
zone = 'HST'
_utcoffset = timedelta(seconds=-36000)
_tzname = 'HST'
HST = HST()
|
hlin117/statsmodels | refs/heads/master | statsmodels/tools/print_version.py | 23 | #!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas, ['version', 'version']))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
|
tempbottle/pykafka | refs/heads/master | pykafka/client.py | 2 | """
Author: Keith Bourgoin, Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["KafkaClient"]
import handlers
import logging
from cluster import Cluster
try:
import rd_kafka
except ImportError:
rd_kafka = None
log = logging.getLogger(__name__)
class KafkaClient(object):
"""
A high-level pythonic client for Kafka
"""
def __init__(self,
hosts='127.0.0.1:9092',
use_greenlets=False,
socket_timeout_ms=30 * 1000,
offsets_channel_socket_timeout_ms=10 * 1000,
ignore_rdkafka=False,
exclude_internal_topics=True):
"""Create a connection to a Kafka cluster.
:param hosts: Comma-separated list of kafka hosts to used to connect.
:type hosts: str
:param use_greenlets: If True, use gevent instead of threading.
:type use_greenlets: bool
:param socket_timeout_ms: The socket timeout (in milliseconds) for
network requests
:type socket_timeout_ms: int
:param offsets_channel_socket_timeout_ms: The socket timeout (in
milliseconds) when reading responses for offset commit and
offset fetch requests.
:type offsets_channel_socket_timeout_ms: int
:param ignore_rdkafka: Don't use rdkafka, even if installed.
:type ignore_rdkafka: bool
:param exclude_internal_topics: Whether messages from internal topics
(specifically, the offsets topic) should be exposed to the consumer.
:type exclude_internal_topics: bool
"""
self._seed_hosts = hosts
self._socket_timeout_ms = socket_timeout_ms
self._offsets_channel_socket_timeout_ms = offsets_channel_socket_timeout_ms
self._handler = None if use_greenlets else handlers.ThreadingHandler()
self._use_rdkafka = rd_kafka and not ignore_rdkafka
if self._use_rdkafka:
log.info('Using rd_kafka extensions.')
raise NotImplementedError('Not yet.')
else:
self.cluster = Cluster(
self._seed_hosts,
self._handler,
socket_timeout_ms=self._socket_timeout_ms,
offsets_channel_socket_timeout_ms=self._offsets_channel_socket_timeout_ms,
exclude_internal_topics=exclude_internal_topics
)
self.brokers = self.cluster.brokers
self.topics = self.cluster.topics
def __repr__(self):
return "<{module}.{name} at {id_} (hosts={hosts})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
hosts=self._seed_hosts,
)
def update_cluster(self):
"""Update known brokers and topics.
Updates each Topic and Broker, adding new ones as found,
with current metadata from the cluster.
"""
self.cluster.update()
|
sloe/metric | refs/heads/metric | models/5_debug.py | 1 |
if request.env.HTTP_HOST.split(':')[0] in ('127.0.0.1', 'localhost'):
# Reload changed modules in development deployments
from gluon.custom_import import track_changes
track_changes(True)
|
NatKarmios/beam-interactive-python-unofficial | refs/heads/master | beam_interactive_unofficial/exceptions.py | 1 | class InvalidAuthenticationError(ValueError):
"""Raised if the auth details you pass into '.start()' are incorrectly formatted."""
pass
class ConnectionFailedError(Exception):
"""Raised if the connection to Beam fails - this is often due to your auth details being incorrect."""
pass
class ClientNotConnectedError(Exception):
"""Raised if a method is called on a BeamInteractiveClient when it is not connected."""
def __init__(self):
self.args = "The client must be connected to do that!",
|
nazo/ansible | refs/heads/devel | test/runner/lib/classification.py | 23 | """Classify changes in Ansible code."""
from __future__ import absolute_import, print_function
import os
import time
from lib.target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
)
from lib.util import (
display,
)
from lib.import_analysis import (
get_python_module_utils_imports,
)
def categorize_changes(paths, verbose_command=None):
"""
:type paths: list[str]
:type verbose_command: str
:rtype paths: dict[str, list[str]]
"""
mapper = PathMapper()
commands = {
'sanity': set(),
'compile': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
additional_paths = set()
for path in paths:
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=1)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=1)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
for path in paths:
tests = mapper.classify(path)
if tests is None:
display.info('%s -> all' % path, verbosity=1)
tests = all_tests() # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
tests = dict((key, value) for key, value in tests.items() if value)
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests.keys() if command != verbose_command):
result += ' (targeted)'
else:
result = '%s' % tests
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
for command in commands:
if any(t == 'all' for t in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands.keys() if commands[c])
return commands
class PathMapper(object):
"""Map file paths to test commands and targets."""
def __init__(self):
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.compile_paths = set(t.path for t in self.compile_targets)
self.units_modules = set(t.module for t in self.units_targets if t.module)
self.units_paths = set(a for t in self.units_targets for a in t.aliases)
self.sanity_paths = set(t.path for t in self.sanity_targets)
self.module_names_by_path = dict((t.path, t.module) for t in self.module_targets)
self.integration_targets_by_name = dict((t.name, t) for t in self.integration_targets)
self.integration_targets_by_alias = dict((a, t) for t in self.integration_targets for a in t.aliases)
self.posix_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'posix/' in t.aliases for m in t.modules)
self.windows_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'windows/' in t.aliases for m in t.modules)
self.network_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'network/' in t.aliases for m in t.modules)
self.prefixes = load_integration_prefixes()
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
def get_dependent_paths(self, path):
"""
:type path: str
:rtype: list[str]
"""
ext = os.path.splitext(os.path.split(path)[1])[1]
if path.startswith('lib/ansible/module_utils/'):
if ext == '.py':
return self.get_python_module_utils_usage(path)
return []
def get_python_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if path == 'lib/ansible/module_utils/__init__.py':
return []
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = os.path.splitext(path)[0].replace('/', '.')[4:]
if name.endswith('.__init__'):
name = name[:-9]
return sorted(self.python_module_utils_imports[name])
def classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# compile path if eligible
if path in self.compile_paths:
result['compile'] = path
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('.github/'):
return minimal
if path.startswith('bin/'):
return minimal
if path.startswith('contrib/'):
return {
'units': 'test/units/contrib/'
}
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/modules/'):
module = self.module_names_by_path.get(path)
if module:
return {
'units': module if module in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module) if ext == '.ps1' else None,
'network-integration': self.network_integration_by_module.get(module),
}
return minimal
if path.startswith('lib/ansible/module_utils/'):
if ext == '.ps1':
return {
'windows-integration': 'all',
}
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if path.startswith('lib/ansible/plugins/connection/'):
if name == '__init__':
return {
'integration': 'all',
'windows-integration': 'all',
'network-integration': 'all',
'units': 'test/units/plugins/connection/',
}
units_path = 'test/units/plugins/connection/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
# entire integration test commands depend on these connection plugins
if name == 'winrm':
return {
'windows-integration': 'all',
'units': units_path,
}
if name == 'local':
return {
'integration': 'all',
'network-integration': 'all',
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': 'all',
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'units': units_path,
}
if path.startswith('lib/ansible/plugins/terminal/'):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target)
return {
'units': 'all',
}
return {
'network-integration': 'all',
'units': 'all',
}
if path.startswith('lib/ansible/utils/module_docs_fragments/'):
return {
'sanity': 'all',
}
if path.startswith('lib/ansible/'):
return all_tests() # broad impact, run all tests
if path.startswith('packaging/'):
return minimal
if path.startswith('test/compile/'):
return {
'compile': 'all',
}
if path.startswith('test/results/'):
return minimal
if path.startswith('test/integration/roles/'):
return minimal
if path.startswith('test/integration/targets/'):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name[path.split('/')[3]]
if 'hidden/' in target.aliases:
return {
'integration': 'all',
'windows-integration': 'all',
'network-integration': 'all',
}
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
}
if path.startswith('test/integration/'):
return {
'integration': 'all',
'windows-integration': 'all',
'network-integration': 'all',
}
if path.startswith('test/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/units/'):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if path.startswith('test/runner/'):
return all_tests() # test infrastructure, run all tests
if path.startswith('test/utils/shippable/'):
return all_tests() # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if path == 'test/README.md':
return minimal
if path.startswith('ticket_stubs/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.gitmodules',
'.mailmap',
'tox.ini', # obsolete
'COPYING',
'VERSION',
'Makefile',
'setup.py',
):
return minimal
if path in (
'shippable.yml',
'.coveragerc',
):
return all_tests() # test infrastructure, run all tests
if path == '.yamllint':
return {
'sanity': 'all',
}
if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
return minimal
return None # unknown, will result in fall-back to run all tests
def all_tests():
"""
:rtype: dict[str, str]
"""
return {
'sanity': 'all',
'compile': 'all',
'units': 'all',
'integration': 'all',
'windows-integration': 'all',
'network-integration': 'all',
}
|
Conjuror/fxos-certsuite | refs/heads/master | mcts/web-platform-tests/tests/tools/pywebsocket/src/test/test_stream_hixie75.py | 496 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
pavelgolik/netr | refs/heads/master | nn.py | 2 | #!/usr/bin/env python
# This file is part of netr.
#
# netr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License 3 as published by
# the Free Software Foundation.
#
# netr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with netr. If not, see <http://www.gnu.org/licenses/>.
import sys, numpy, math, logging, os, random, ConfigParser
from progressbar import *
from metrics import Metric
from data import Data
class NN:
def __init__(self, config):
self.config = config
self.nLayer = config.getint("Architecture", "layer")
self.nNodes = config.getint("Architecture", "nodes")
self.nIter = config.getint("Architecture", "iterations")
self.etha = config.getfloat("Factors", "initlearnrate")
self.alpha = config.getfloat("Factors", "momentum")
self.steepness = config.getfloat("Factors", "steepness")
self.stepsizedec = config.getfloat("Factors", "stepsizedec")
self.stepsizeinc = config.getfloat("Factors", "stepsizeinc")
self.offset = config.getfloat("Factors", "initoffset")
self.mindpp = config.getfloat("Thresholds", "mindpp")
self.mindsse = config.getfloat("Thresholds", "mindsse")
self.mindsumweights = config.getfloat("Thresholds", "mindsumweights")
self.actfunc = config.get("Architecture", "activation")
self.weightsinit = config.get("Architecture", "initweights")
self.errfct = config.get("Architecture", "errorfunction")
self.metrics = Metric(config.get("Output", "metrics"), config.getint("Output", "metricsclass"))
self.verbosity = config.getint("Output", "verbosity")
self.interactive = config.getboolean("Output", "interactive")
self.weights = []
self.outs = []
self.deltas = []
self.generateActivationFunction()
##############################################################################
def generateActivationFunction(self):
if self.actfunc == "logistic":
def dphi(net):
r = 1.0/(1.0+numpy.exp(-net * self.steepness))
return numpy.multiply( r, (1.0-r) )
self.phi = lambda net: 1.0/(1.0+numpy.exp(-net * self.steepness))
self.dphi = dphi
elif self.actfunc == "tanh":
self.phi = lambda net: numpy.tanh(self.steepness * net)
self.dphi = lambda net: self.steepness * (1.0-numpy.power(numpy.tanh(net), 2))
elif self.actfunc == "linear":
self.phi = lambda net: self.steepness * net
self.dphi = lambda net: self.steepness
elif self.actfunc == "softmax":
def phi(net):
s = 1.0/numpy.exp(-net).sum()
return s * numpy.exp(-net)
self.phi = foo
def dphi(net):
r = self.phi(net)
return numpy.multiply( r, (1.0-r) )
self.dphi = dphi
elif self.actfunc == "gauss":
self.phi = lambda net: numpy.exp(-numpy.power(net-1,2) * self.steepness)
self.dphi= lambda net: -2*numpy.multiply(net-1, numpy.exp(-numpy.power(net-1,2)))
elif self.actfunc == "sin":
self.phi = lambda net: numpy.sin(self.steepness * net)
self.dphi= lambda net: self.steepness * numpy.cos(self.steepness * net)
else:
logging.error("Unknown activation function. Available: logistic, tanh, linear, softmax, gauss, sin")
sys.exit(-1)
##############################################################################
def reload(self, config, weights):
self.__init__(config)
self.weights = weights
##############################################################################
def initWeights(self, cls, feat):
self.nIn = feat
self.nOut = cls
def initWeights( generateMatrixFunc ):
self.weights.append( generateMatrixFunc(self.nIn, self.nNodes) )
for i in range(1, self.nLayer):
self.weights.append( generateMatrixFunc(self.nNodes, self.nNodes) )
self.weights.append( generateMatrixFunc(self.nNodes, self.nOut) )
if self.weightsinit == "randuni":
def mat(n,m): return self.offset * (numpy.mat(numpy.random.rand(n, m)) + 0.5)
elif self.weightsinit == "randgauss":
def mat(n,m): return self.offset * numpy.mat(numpy.random.standard_normal( [n, m] ))
elif self.weightsinit == "uniform":
def mat(n,m): return self.offset * numpy.mat(numpy.ones( [n, m] ))
elif self.weightsinit == "exponential":
def mat(n,m): return self.offset * numpy.mat(numpy.random.standard_exponential( size=[n, m] ))
else:
logging.error("Unknown weights initialization. Available: randuni, randgauss, uniform, exponential")
sys.exit(-1)
initWeights(mat)
from copy import copy
self.lastchange = copy(self.weights)
self.outs = [None] * (self.nLayer + 1)
self.deltas = [None] * (self.nLayer + 1)
##############################################################################
def test(self, data):
conf = numpy.zeros([self.nOut, self.nOut], numpy.int16)
allprobs = [ None ] * len(data)
for i,row in enumerate(data):
allprobs[i] = self.passForward(row)
conf[ data.targets[i], allprobs[i].argmax() ] += 1
#TODO: not needed?
allprobs[i] /= allprobs[i].sum()
return conf, 1-conf.trace()/float(conf.sum()), allprobs
##############################################################################
def passForward(self, row):
# input
sum = row * self.weights[0]
self.outs[0] = (sum, self.phi(sum))
# next layers
for w in range( 1, self.nLayer+1 ):
sum = self.outs[w-1][1] * self.weights[w]
self.outs[w] = (sum, self.phi(sum))
return self.outs[-1][1][0]
##############################################################################
def train(self, data):
sse = sys.maxint
pp = sys.maxint
self.initWeights( data.cls, data.feat )
interactive = self.interactive and os.isatty(sys.stdout.fileno())
ref = numpy.zeros( [1,self.nOut] )
c_old = 0
allprobs = [None] * len(data)
for i in range(self.nIter):
conf = numpy.zeros( [ self.nOut, self.nOut ] )
sumold = sse
ppold = pp
sse = 0.0
sce = 0.0
if interactive: pbar = ProgressBar(maxval=len(data)).start()
for k,row in enumerate(data):
probs = self.passForward(row)
ref[0,c_old] = 0
ref[0,data.targets[k]] = 1
c_old = data.targets[k]
diff = ref-probs
if self.errfct == "sse":
self.deltas[-1] = numpy.multiply( diff, self.dphi( probs ) )
sse += numpy.power(diff, 2).sum()
elif self.errfct == "sce":
self.deltas[-1] = diff * self.steepness
# cross entropy: 1/C * sum{ (tk*log(yk)) + (1-tk)*log(1-yk) }
sce -= ((numpy.multiply(ref, numpy.log( probs )) + numpy.multiply((1-ref), numpy.log( 1 - probs )))).sum() / self.nOut
weightschange = self.passBackward(row)
if interactive: pbar.update(k)
# train statistics
c_pred = probs.argmax()
conf[ data.targets[k], c_pred ] += 1
allprobs[k] = probs
#conf_, err, tepr = self.test( testdata )
#conf_, err, tepr = self.test( data )
output = self.metrics.obtain( data, allprobs, conf, 1-conf.trace()/float(conf.sum()) )
if self.errfct == "sse":
output["errfct"] = "SSE: % 6.4f" % sse
elif self.errfct == "sce":
output["errfct"] = "SCE: % 6.4f" % sce
if interactive: pbar.finish()
metrics = "%(lift)s%(pp)s%(fscore)s%(tester)s%(auc)s" % output
logging.warning("iter: % 4d er: %.6f %s rate: %.4f%s", i+1, 1-conf.trace()/conf.sum(), output["errfct"], self.etha, metrics)
#for i in range(len(self.weights)):
# print "pruned:", (numpy.abs(self.weights[i])<0.1).sum()
# self.weights[i][numpy.abs(self.weights[i])<0.1]=0
#if weightschange < self.mindsumweights:
# self.weights[-1] = self.weights[-1] + numpy.random.standard_normal([self.nNodes, self.nOut]) * 0.1
# logging.warning("disturbing weights for leaving local optimum...")
if sumold - sse < self.mindsse or ppold - pp < self.mindpp:
self.etha *= self.stepsizedec
else:
self.etha *= self.stepsizeinc
return allprobs
##############################################################################
def passBackward(self, row):
# precompute deltas for the inner layers
for l in range(self.nLayer)[::-1]:
self.deltas[l] = self.deltas[l+1] * self.weights[l+1].T
self.deltas[l] = numpy.multiply( self.deltas[l], self.dphi(self.outs[l][0]) )
#for i in range(self.nNodes):
# self.deltas[l][i] = 0.0
# for j in range(len(self.deltas[l+1])):
# self.deltas[l][i] += self.deltas[l+1][j] * self.weights[l+1][i,j]
# self.deltas[l][i] *= self.dphi( self.outs[l][i][0] )
#self.etha *= (1-self.alpha)
#output layer
delta = self.etha * numpy.outer( self.outs[-2][1], self.deltas[-1] ) + self.alpha * self.lastchange[-1]
self.weights[-1] = self.weights[-1] + delta
self.lastchange[-1] = delta
#for j in range(self.nOut):
# f = self.etha * self.deltas[-1][j]
# for i in range( self.nNodes ):
# self.weights[-1][i,j] += f * self.outs[-2][i][1]
# recalculate weights forwards
#inner layers
for l in range( 1, self.nLayer ):
#for j in range(self.nNodes):
# f = self.etha * self.deltas[l][j]
# for i in range (self.nNodes):
# self.weights[l][i,j] += f * self.outs[l-1][i][1]
delta = (1-self.alpha) * self.etha * numpy.outer( self.outs[l-1][1], self.deltas[l] ) + self.alpha * self.lastchange[l]
self.weights[l] = self.weights[l] + delta
self.lastchange[l] = delta
# input vector once again influences w'
#for j in range(self.nNodes):
# f = self.etha * self.deltas[0][j]
# for i in range(self.nIn):
# self.weights[0][i,j] += f * row[i]
delta = (1-self.alpha) * self.etha * numpy.outer( row, self.deltas[0] ) + self.alpha * self.lastchange[0]
self.weights[0] = self.weights[0] + delta
self.lastchange[0] = delta
return sum( [ d.sum() for d in self.lastchange ] )
##############################################################################
def savemodel(self, modelname):
import pickle
model = ( self.weights, self.config )
pickle.dump(model, open(modelname, "w"))
def loadmodel(self, modelname):
import pickle
self.weights, self.config = pickle.load(file(modelname))
self.reload(self.config, self.weights)
##############################################################################
|
karstenw/nodebox-pyobjc | refs/heads/master | examples/Extended Application/matplotlib/examples/images_contours_and_fields/custom_cmap.py | 1 | """
=========================================
Creating a colormap from a list of colors
=========================================
Creating a colormap from a list of colors can be done with the `from_list`
method of `LinearSegmentedColormap`. You must pass a list of RGB tuples that
define the mixture of colors from 0 to 1.
Creating custom colormaps
-------------------------
It is also possible to create a custom mapping for a colormap. This is
accomplished by creating dictionary that specifies how the RGB channels
change from one end of the cmap to the other.
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use::
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.::
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Make some illustrative fake data:
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2 * np.pi, 0.1)
X, Y = np.meshgrid(x, y)
Z = np.cos(X) * np.sin(Y) * 10
###############################################################################
# --- Colormaps from a list ---
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # R -> G -> B
n_bins = [3, 6, 10, 100] # Discretizes the interpolation into bins
cmap_name = 'my_list'
fig, axs = plt.subplots(2, 2, figsize=(6, 9))
fig.subplots_adjust(left=0.02, bottom=0.06, right=0.95, top=0.94, wspace=0.05)
for n_bin, ax in zip(n_bins, axs.ravel()):
# Create the colormap
cm = LinearSegmentedColormap.from_list(
cmap_name, colors, N=n_bin)
# Fewer bins will result in "coarser" colomap interpolation
im = ax.imshow(Z, interpolation='nearest', origin='lower', cmap=cm)
ax.set_title("N bins: %s" % n_bin)
fig.colorbar(im, ax=ax)
###############################################################################
# --- Custom colormaps ---
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25, 1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Make a modified version of cdict3 with some transparency
# in the middle of the range.
cdict4 = cdict3.copy()
cdict4['alpha'] = ((0.0, 1.0, 1.0),
# (0.25,1.0, 1.0),
(0.5, 0.3, 0.3),
# (0.75,1.0, 1.0),
(1.0, 1.0, 1.0))
###############################################################################
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
###############################################################################
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
###############################################################################
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
plt.register_cmap(name='BlueRedAlpha', data=cdict4)
###############################################################################
# Make the figure:
fig, axs = plt.subplots(2, 2, figsize=(6, 9))
fig.subplots_adjust(left=0.02, bottom=0.06, right=0.95, top=0.94, wspace=0.05)
# Make 4 subplots:
im1 = axs[0, 0].imshow(Z, interpolation='nearest', cmap=blue_red1)
fig.colorbar(im1, ax=axs[0, 0])
cmap = plt.get_cmap('BlueRed2')
im2 = axs[1, 0].imshow(Z, interpolation='nearest', cmap=cmap)
fig.colorbar(im2, ax=axs[1, 0])
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
im3 = axs[0, 1].imshow(Z, interpolation='nearest')
fig.colorbar(im3, ax=axs[0, 1])
axs[0, 1].set_title("Alpha = 1")
# Or as yet another variation, we can replace the rcParams
# specification *before* the imshow with the following *after*
# imshow.
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
#
# Draw a line with low zorder so it will be behind the image.
axs[1, 1].plot([0, 10 * np.pi], [0, 20 * np.pi], color='c', lw=20, zorder=-1)
im4 = axs[1, 1].imshow(Z, interpolation='nearest')
fig.colorbar(im4, ax=axs[1, 1])
# Here it is: changing the colormap for the current image and its
# colorbar after they have been plotted.
im4.set_cmap('BlueRedAlpha')
axs[1, 1].set_title("Varying alpha")
#
fig.suptitle('Custom Blue-Red colormaps', fontsize=16)
fig.subplots_adjust(top=0.9)
pltshow(plt)
|
stuyCTF/stuyCTF-Platform | refs/heads/master | api/api/__init__.py | 1 | """
Imports and setup functionality
"""
import api.logger
import api.setup
import api.user
import api.team
import api.group
import api.annotations
import api.auth
import api.common
import api.cache
import api.problem
import api.stats
import api.utilities
import api.autogen
import api.autogen_tools
import api.admin
# MUST BE LAST
import api.config
api.setup.index_mongo()
|
adversary-org/pyme3 | refs/heads/master | examples/encrypt-to-all.py | 1 | #!/usr/bin/env python3
# $Id$
# Copyright (C) 2008 Igor Belyi <belyi@users.sourceforge.net>
# Copyright (C) 2002 John Goerzen <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This program will try to encrypt a simple message to each key on your keyring.
If your keyring has any invalid keys on it, those keys will be removed
and it will re-try the encryption."""
from pyme import core
from pyme.core import Data, Context
from pyme.constants import validity
core.check_version(None)
plain = Data('This is my message.')
c = Context()
c.set_armor(1)
def sendto(keylist):
cipher = Data()
c.op_encrypt(keylist, 1, plain, cipher)
cipher.seek(0,0)
return cipher.read()
names = []
for key in c.op_keylist_all(None, 0):
print(" *** Found key for %s" % key.uids[0].uid)
valid = 0
for subkey in key.subkeys:
keyid = subkey.keyid
if keyid == None:
break
can_encrypt = subkey.can_encrypt
valid += can_encrypt
print(" Subkey %s: encryption %s" % \
(keyid, can_encrypt and "enabled" or "disabled"))
if valid:
names.append(key)
else:
print(" This key cannot be used for encryption; skipping.")
passno = 0
print("Encrypting to %d recipients" % len(names))
print(sendto(names))
|
Michagogo/bitcoin | refs/heads/master | share/rpcuser/rpcuser.py | 115 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
danuzclaudes/robottelo | refs/heads/master | tests/foreman/ui/test_dashboard.py | 4 | """Test module for Dashboard UI"""
from robottelo.decorators import stubbed, tier1, tier2
from robottelo.test import UITestCase
class DashboardTestCase(UITestCase):
"""Tests for Dashboard UI"""
@stubbed()
@tier1
def test_positive_save(self):
"""Save the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widgets
3.Select the Manage Dropdown box
4.Save the Dashboard
@Assert: Dashboard is saved successfully
and the removed widgets does not appear.
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_reset(self):
"""Reset the Dashboard to default UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widgets
3.Select the Manage Dropdown box
4.Save the Dashboard
5.Dashboard Widgets are saved successfully
6.Click Reset to default
@Assert: Widget positions successfully saved.
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_add_widgets(self):
"""Add Widgets to the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Select Manage Dropdown box
3.Add Widgets
@Assert: User is able to add widgets.
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_search_bookmark(self):
"""Bookmark the search filter in Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
3.Bookmark the search filter
@Assert: User is able to list the Bookmark
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_host_configuration_status(self):
"""Check if the Host Configuration Status
Widget links are working
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Host Configuration Status
3.Navigate to each of the links which has
search string associated with it.
@Assert: Each link shows the right info
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_host_configuration_chart(self):
"""Check if the Host Configuration Chart
is working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Host Configuration Chart widget
3.Navigate to each of the links which
has search string associated with it.
@Assert: Each link shows the right info
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_task_status(self):
"""Check if the Task Status is
working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Task Status widget
3.Click each link
@Assert: Each link shows the right info
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_latest_warning_error_tasks(self):
"""Check if the Latest Warning/Error
Tasks Status are working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Latest Warning/Error Tasks widget.
@Assert: The links to all failed/warnings tasks are working
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_content_view_history(self):
"""Check if the Content View History
are working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Content View History widget
@Assert: Each Content View link shows its current status
(the environment to which it is published)
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_discovered_hosts(self):
"""Check if the user can access Discovered
Host Widget in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Discovered Hosts widget
3.Click on the list of Discovered Hosts
@Assert: It takes you to discovered hosts
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_latest_events_widget(self):
"""Check if the Latest Events Widget
is working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Review the Latest Events widget
@Assert: The Widget is updated with
all the latest events
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_sync_overview_widget(self):
"""Check if the Sync Overview Widget
is working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Create a product
2.Add a repo and sync it
3.Navigate to Monitor -> Dashboard
4.Review the Sync Overview widget
for the above sync details
@Assert: Sync Overview widget is
updated with all sync processes
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_content_host_subscription_status(self):
"""Check if the Content Host Subscription Status
is working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Register Content Host and subscribe it
2.Navigate Monitor -> Dashboard
3.Review the Content Host Subscription Status
4.Click each link :
a.Invalid Subscriptions
b.Insufficient Subscriptions
c.Current Subscriptions
@Assert: The widget is updated with all details for Current,
Invalid and Insufficient Subscriptions
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_current_subscription_totals(self):
"""Check if the Current Subscriptions Totals widget
is working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Make sure sat6 has some active subscriptions
2.Navigate to Monitor -> Dashboard
3.Review the Current Subscription Total widget
@Assert: The widget displays all the active
subscriptions and expired subscriptions details
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_host_collections(self):
"""Check if the Host Collections widget
displays list of host collection in UI
@Feature: Dashboard
@Steps:
1.Make sure to have some hosts and host collections
2.Navigate Monitor -> Dashboard
3.Review the Host Collections Widget
@Assert: The list of host collections along
with content host is displayed in the widget
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_run_distribution_widget(self):
"""Check if the Run distribution widget is
working in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate Monitor -> Dashboard
2.Review the Run Distribution
in the last 30 minutes widget
@Assert: The widget shows appropriate data
@Status: Manual
"""
@stubbed()
@tier2
def test_positive_latest_errata_widget(self):
"""Check if the Latest Errata widget is
working in Dashboard the UI
@Feature: Dashboard
@Steps:
1.Make sure you have applied some errata to content host
2.Navigate Monitor -> Dashboard
3.Review the Latest Errata widget
@Assert: The widget is updated with
all errata related details
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_remove_widget(self):
"""Check if the user is able to remove widget
in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widget
@Assert: Widget is removed
The widget is listed under Manage -> Add Widget
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_add_removed_widget(self):
"""Check if the user is able to add removed
widget in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to remove some widget
3.Widget is removed
4.The widget is listed under Manage -> Add Widget
5.Click to add the widget back
@Assert: The widget is added back to the Dashboard
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_minimize_widget(self):
"""Check if the user is able to minimize the widget
in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to minimize some widget
@Assert: Widget is minimized
The widget is listed under Manage -> Restore Widget
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_restore_minimize_widget(self):
"""Check if the user is able to restoring the minimized
widget in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Try to minimize some widget
3.Widget is minimized
4.The widget is listed
under Manage -> Restore Widget
5.Click to add the widget back
@Assert: The widget is added
back to the Dashboard
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_toggle_auto_refresh(self):
"""Check if the user is able to Toggle
Auto refresh in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Click Auto Refresh ON/OFF
@Assert: The auto refresh functionality
works as per the set value.
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_search(self):
"""Check if the search box is working
in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
@Assert: Data displayed according to search box
@Status: Manual
"""
@stubbed()
@tier1
def test_positive_clear_search_box(self):
"""Check if the user is able to clear the
search box in the Dashboard UI
@Feature: Dashboard
@Steps:
1.Navigate to Monitor -> Dashboard
2.Add a filter to search box (eg. environment)
3.Data displayed according to search box
4.On left side of the box click
the Clear cross sign
@Assert: Search box is cleared
@Status: Manual
"""
|
notforhire/Yup | refs/heads/master | share/qt/make_spinner.py | 563 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload.png'
TMPDIR='../../src/qt/res/movies/'
TMPNAME='spinner-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
|
vgrem/Office365-REST-Python-Client | refs/heads/master | office365/projectserver/__init__.py | 12133432 | |
tbs1980/otkpp | refs/heads/master | pyotk/pyotk/resultstable.py | 1 | """Implements tools for printing result tables.
This module implements functions for printing statistical
information such as number of iterations, function iterations,
etc. about iteration results.
"""
from native import *
import sys
from testproblems import *
def print_table(T, num_rows, num_cols, column_widths, stream=None, print_mode=None):
tr = 0
w = 0
for ci in range(num_cols):
w += column_widths[ci] + 1
for ri in range(2*num_rows + 1):
if ri % 2 == 0:
for i in range(w+1):
sys.stdout.write('-')
print
else:
sys.stdout.write('|')
for ci in range(num_cols):
format_str = "%-" + str(column_widths[ci])
if type(T[tr][ci]) == str:
format_str += "s"
elif type(T[tr][ci]) == float:
format_str += '.3f'
else:
format_str += 'd'
print format_str % T[tr][ci],
sys.stdout.write('|')
print
if ri % 2 == 1:
tr = tr + 1
def solver_results_table(S, f, x0, sc, max_num_iter):
"""For a given set of solvers S, the given test function f,
print a table of statistical information of the given solvers.
The starting point and stopping criterion are specified with
the parameters x0 and sc, respectively. Iterations are terminates
after max_num_iter steps. The results table contains the following
information for each solver on termination of the iteration:
- number of iterations
- number of function evaluations
- number of function evaluations per iteration
- number of gradient evaluations
- number of gradient evaluations per iteration
"""
num_rows = len(S) + 1
num_cols = 6
R = []
R.append([])
R[0].append("Algorithm")
R[0].append("i")
R[0].append("f")
R[0].append("f/i")
R[0].append("g")
R[0].append("g/i")
for ri in range(num_rows):
R.append([])
for ci in range(num_cols):
R[ri+1].append("")
column_widths = (30, 6, 6, 6, 6, 6)
for si in range(len(S)):
#results = minimize(S[si], DefaultSolverSetup(),
#f, sc, x0, NoConstraints(), 0, False)
results = S[si].solve(f, x0, sc, Solver.DefaultSetup(), NoConstraints(), False)
R[si+1][0] = S[si].get_name()
R[si+1][1] = results.num_iter
R[si+1][2] = results.num_func_eval
R[si+1][3] = 1.0*results.num_func_eval / results.num_iter
R[si+1][4] = results.num_grad_eval
R[si+1][5] = 1.0*results.num_grad_eval / results.num_iter
print_table(R, num_rows, num_cols, column_widths)
def main():
S = [ DSQA(),
GSLfdfsolver('vector_bfgs'),
GSLfdfsolver('vector_bfgs2'),
GSLfdfsolver('conjugate_fr'),
GSLfdfsolver('conjugate_pr') ]
f = ExtendedRosenbrock(n=6)
solver_results_table(S, f.otk_instance, f.x0, f.stopcrit, 10000)
if __name__ == "__main__":
main()
|
lsinfo/odoo | refs/heads/8.0 | openerp/addons/test_access_rights/tests/__init__.py | 404 | import test_ir_rules
|
konstruktoid/ansible-upstream | refs/heads/devel | test/units/modules/network/dellos6/test_dellos6_config.py | 46 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.dellos6 import dellos6_config
from units.modules.utils import set_module_args
from .dellos6_module import TestDellos6Module, load_fixture
class TestDellos6ConfigModule(TestDellos6Module):
module = dellos6_config
def setUp(self):
super(TestDellos6ConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.dellos6.dellos6_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.dellos6.dellos6_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.dellos6.dellos6_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestDellos6ConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'dellos6_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_dellos6_config_unchanged(self):
src = load_fixture('dellos6_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_dellos6_config_src(self):
src = load_fixture('dellos6_config_src.cfg')
set_module_args(dict(src=src))
commands = ['hostname foo', 'exit', 'interface Te1/0/2',
'shutdown', 'exit']
self.execute_module(changed=True, commands=commands)
def test_dellos6_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_dellos6_config_save(self):
set_module_args(dict(save=True))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0])
# self.assertIn('copy running-config startup-config\r', args)
def test_dellos6_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_dellos6_config_lines_w_parents(self):
set_module_args(dict(lines=['description "teest"', 'exit'], parents=['interface Te1/0/2']))
commands = ['interface Te1/0/2', 'description "teest"', 'exit']
self.execute_module(changed=True, commands=commands)
def test_dellos6_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar']))
commands = ['snmp-server contact bar', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False)
def test_dellos6_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar']))
commands = ['hostname foo', 'snmp-server contact bar']
self.execute_module(changed=True, commands=commands, sort=False)
def test_dellos6_config_before_after_no_change(self):
set_module_args(dict(lines=['hostname router'],
before=['snmp-server contact bar'],
after=['snmp-server location chennai']))
self.execute_module()
def test_dellos6_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands)
def test_dellos6_config_replace_block(self):
lines = ['description test string', 'shutdown']
parents = ['interface Te1/0/2']
set_module_args(dict(lines=lines, replace='block', parents=parents))
commands = parents + lines
self.execute_module(changed=True, commands=commands)
def test_dellos6_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines)
def test_dellos6_config_match_none(self):
lines = ['description test string', 'shutdown']
parents = ['interface Te1/0/2']
set_module_args(dict(lines=lines, parents=parents, match='none'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
def test_dellos6_config_match_strict(self):
lines = ['description "test_string"',
'shutdown']
parents = ['interface Te1/0/1']
set_module_args(dict(lines=lines, parents=parents, match='strict'))
commands = parents + ['shutdown']
self.execute_module(changed=True, commands=commands, sort=False)
def test_dellos6_config_match_exact(self):
lines = ['description test_string', 'shutdown']
parents = ['interface Te1/0/1']
set_module_args(dict(lines=lines, parents=parents, match='exact'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
|
nzavagli/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/plat-mac/Carbon/Sndihooks.py | 82 | from _Sndihooks import *
|
mSenyor/sl4a | refs/heads/master | python/src/Mac/Modules/file/filesupport.py | 34 | # This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
#
# XXXX TO DO:
# - Implement correct missing FSSpec handling for Alias methods
# - Implement FInfo
#
# WARNING WARNING WARNING
# The file _Filemodule.c was modified manually, don't run this script
# unless you really know what you're doing.
import sys
sys.exit(42)
import string
# Declarations that change for each manager
#MACHEADERFILE = 'Files.h' # The Apple header file
MODNAME = '_File' # The name of the module
LONGMODNAME = 'Carbon.File' # The "normal" external name of the module
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'File' # The prefix for module-wide routines
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Various integers:
SInt64 = Type("SInt64", "L")
UInt64 = Type("UInt64", "L")
FNMessage = Type("FNMessage", "l")
FSAllocationFlags = Type("FSAllocationFlags", "H")
FSCatalogInfoBitmap = Type("FSCatalogInfoBitmap", "l")
FSIteratorFlags = Type("FSIteratorFlags", "l")
FSVolumeRefNum = Type("FSVolumeRefNum", "h")
AliasInfoType = Type("AliasInfoType", "h")
# Various types of strings:
#class UniCharCountBuffer(InputOnlyType):
# pass
class VarReverseInputBufferType(ReverseInputBufferMixin, VarInputBufferType):
pass
FullPathName = VarReverseInputBufferType()
ConstStr31Param = OpaqueArrayType("Str31", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr32Param = OpaqueArrayType("Str32", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr63Param = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
Str63 = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
HFSUniStr255 = OpaqueType("HFSUniStr255", "PyMac_BuildHFSUniStr255", "PyMac_GetHFSUniStr255")
UInt8_ptr = InputOnlyType("UInt8 *", "s")
# Other types:
class OptionalFSxxxType(OpaqueByValueType):
def declare(self, name):
Output("%s %s__buf__;", self.typeName, name)
Output("%s *%s = &%s__buf__;", self.typeName, name, name)
class FSCatalogInfoAndBitmapType(InputOnlyType):
def __init__(self):
InputOnlyType.__init__(self, "BUG", "BUG")
def declare(self, name):
Output("PyObject *%s__object = NULL;", name)
Output("FSCatalogInfoBitmap %s__bitmap = 0;", name)
Output("FSCatalogInfo %s;", name)
def getargsFormat(self):
return "lO"
def getargsArgs(self, name):
return "%s__bitmap, %s__object"%(name, name)
def getargsCheck(self, name):
Output("if (!convert_FSCatalogInfo(%s__object, %s__bitmap, &%s)) return NULL;", name, name, name)
def passInput(self, name):
return "%s__bitmap, &%s"% (name, name)
def passOutput(self, name):
return "%s__bitmap, &%s"% (name, name)
def mkvalueFormat(self):
return "O"
def mkvalueArgs(self, name):
return "%s__object" % (name)
def xxxxmkvalueCheck(self, name):
Output("if ((%s__object = new_FSCatalogInfo(%s__bitmap, &%s)) == NULL) return NULL;", name, name)
class FSCatalogInfoAndBitmap_inType(FSCatalogInfoAndBitmapType, InputOnlyMixIn):
def xxxxmkvalueCheck(self, name):
pass
class FSCatalogInfoAndBitmap_outType(FSCatalogInfoAndBitmapType):
def getargsFormat(self):
return "l"
def getargsArgs(self, name):
return "%s__bitmap" % name
def getargsCheck(self, name):
pass
FInfo = OpaqueType("FInfo", "FInfo")
FInfo_ptr = OpaqueType("FInfo", "FInfo")
AliasHandle = OpaqueByValueType("AliasHandle", "Alias")
FSSpec = OpaqueType("FSSpec", "FSSpec")
FSSpec_ptr = OpaqueType("FSSpec", "FSSpec")
OptFSSpecPtr = OptionalFSxxxType("FSSpec", "BUG", "myPyMac_GetOptFSSpecPtr")
FSRef = OpaqueType("FSRef", "FSRef")
FSRef_ptr = OpaqueType("FSRef", "FSRef")
OptFSRefPtr = OptionalFSxxxType("FSRef", "BUG", "myPyMac_GetOptFSRefPtr")
FSCatalogInfo = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
FSCatalogInfo_ptr = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
# To be done:
#CatPositionRec
#FSCatalogInfo
#FSForkInfo
#FSIterator
#FSVolumeInfo
#FSSpecArrayPtr
includestuff = includestuff + """
#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern int _PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int _PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *_PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *_PyMac_BuildFSRef(FSRef *spec);
#define PyMac_GetFSSpec _PyMac_GetFSSpec
#define PyMac_GetFSRef _PyMac_GetFSRef
#define PyMac_BuildFSSpec _PyMac_BuildFSSpec
#define PyMac_BuildFSRef _PyMac_BuildFSRef
#else
extern int PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *PyMac_BuildFSRef(FSRef *spec);
#endif
/* Forward declarations */
static PyObject *FInfo_New(FInfo *itself);
static PyObject *FSRef_New(FSRef *itself);
static PyObject *FSSpec_New(FSSpec *itself);
static PyObject *Alias_New(AliasHandle itself);
static int FInfo_Convert(PyObject *v, FInfo *p_itself);
#define FSRef_Convert PyMac_GetFSRef
#define FSSpec_Convert PyMac_GetFSSpec
static int Alias_Convert(PyObject *v, AliasHandle *p_itself);
/*
** UTCDateTime records
*/
static int
UTCDateTime_Convert(PyObject *v, UTCDateTime *ptr)
{
return PyArg_Parse(v, "(HlH)", &ptr->highSeconds, &ptr->lowSeconds, &ptr->fraction);
}
static PyObject *
UTCDateTime_New(UTCDateTime *ptr)
{
return Py_BuildValue("(HlH)", ptr->highSeconds, ptr->lowSeconds, ptr->fraction);
}
/*
** Optional fsspec and fsref pointers. None will pass NULL
*/
static int
myPyMac_GetOptFSSpecPtr(PyObject *v, FSSpec **spec)
{
if (v == Py_None) {
*spec = NULL;
return 1;
}
return PyMac_GetFSSpec(v, *spec);
}
static int
myPyMac_GetOptFSRefPtr(PyObject *v, FSRef **ref)
{
if (v == Py_None) {
*ref = NULL;
return 1;
}
return PyMac_GetFSRef(v, *ref);
}
/*
** Parse/generate objsect
*/
static PyObject *
PyMac_BuildHFSUniStr255(HFSUniStr255 *itself)
{
return Py_BuildValue("u#", itself->unicode, itself->length);
}
#ifndef __LP64__
/*
** Get pathname for a given FSSpec
*/
static OSErr
_PyMac_GetFullPathname(FSSpec *fss, char *path, int len)
{
FSRef fsr;
OSErr err;
*path = '\0';
err = FSpMakeFSRef(fss, &fsr);
if (err == fnfErr) {
/* FSSpecs can point to non-existing files, fsrefs can't. */
FSSpec fss2;
int tocopy;
err = FSMakeFSSpec(fss->vRefNum, fss->parID, "", &fss2);
if (err)
return err;
err = FSpMakeFSRef(&fss2, &fsr);
if (err)
return err;
err = (OSErr)FSRefMakePath(&fsr, path, len-1);
if (err)
return err;
/* This part is not 100% safe: we append the filename part, but
** I'm not sure that we don't run afoul of the various 8bit
** encodings here. Will have to look this up at some point...
*/
strcat(path, "/");
tocopy = fss->name[0];
if ((strlen(path) + tocopy) >= len)
tocopy = len - strlen(path) - 1;
if (tocopy > 0)
strncat(path, fss->name+1, tocopy);
}
else {
if (err)
return err;
err = (OSErr)FSRefMakePath(&fsr, path, len);
if (err)
return err;
}
return 0;
}
#endif /* !__LP64__ */
"""
finalstuff = finalstuff + """
#ifndef __LP64__
int
PyMac_GetFSSpec(PyObject *v, FSSpec *spec)
{
Str255 path;
short refnum;
long parid;
OSErr err;
FSRef fsr;
if (FSSpec_Check(v)) {
*spec = ((FSSpecObject *)v)->ob_itself;
return 1;
}
if (PyArg_Parse(v, "(hlO&)",
&refnum, &parid, PyMac_GetStr255, &path)) {
err = FSMakeFSSpec(refnum, parid, path, spec);
if ( err && err != fnfErr ) {
PyMac_Error(err);
return 0;
}
return 1;
}
PyErr_Clear();
/* Otherwise we try to go via an FSRef. On OSX we go all the way,
** on OS9 we accept only a real FSRef object
*/
if ( PyMac_GetFSRef(v, &fsr) ) {
err = FSGetCatalogInfo(&fsr, kFSCatInfoNone, NULL, NULL, spec, NULL);
if (err != noErr) {
PyMac_Error(err);
return 0;
}
return 1;
}
return 0;
}
#endif /* !__LP64__ */
int
PyMac_GetFSRef(PyObject *v, FSRef *fsr)
{
OSStatus err;
FSSpec fss;
if (FSRef_Check(v)) {
*fsr = ((FSRefObject *)v)->ob_itself;
return 1;
}
/* On OSX we now try a pathname */
if ( PyString_Check(v) || PyUnicode_Check(v)) {
char *path = NULL;
if (!PyArg_Parse(v, "et", Py_FileSystemDefaultEncoding, &path))
return 0;
if ( (err=FSPathMakeRef(path, fsr, NULL)) )
PyMac_Error(err);
PyMem_Free(path);
return !err;
}
/* XXXX Should try unicode here too */
/* Otherwise we try to go via an FSSpec */
#ifndef __LP64__
if (FSSpec_Check(v)) {
fss = ((FSSpecObject *)v)->ob_itself;
if ((err=FSpMakeFSRef(&fss, fsr)) == 0)
return 1;
PyMac_Error(err);
return 0;
}
PyErr_SetString(PyExc_TypeError, "FSRef, FSSpec or pathname required");
#else /* __LP64__ */
PyErr_SetString(PyExc_TypeError, "FSRef or pathname required");
#endif /* __LP64__ */
return 0;
}
#ifndef __LP64__
extern PyObject *
PyMac_BuildFSSpec(FSSpec *spec)
{
return FSSpec_New(spec);
}
#endif /* __LP64__ */
extern PyObject *
PyMac_BuildFSRef(FSRef *spec)
{
return FSRef_New(spec);
}
"""
initstuff = initstuff + """
#ifndef __LP64__
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSSpec *, PyMac_BuildFSSpec);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSSpec, PyMac_GetFSSpec);
#endif /* !__LP64__*/
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSRef *, PyMac_BuildFSRef);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSRef, PyMac_GetFSRef);
"""
execfile(string.lower(MODPREFIX) + 'typetest.py')
# Our object types:
class FSCatalogInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("nodeFlags",
"return Py_BuildValue(\"H\", self->ob_itself.nodeFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.nodeFlags)-1;",
None
),
("volume",
"return Py_BuildValue(\"h\", self->ob_itself.volume);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.volume)-1;",
None
),
("parentDirID",
"return Py_BuildValue(\"l\", self->ob_itself.parentDirID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.parentDirID)-1;",
None
),
("nodeID",
"return Py_BuildValue(\"l\", self->ob_itself.nodeID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.nodeID)-1;",
None
),
("createDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.createDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.createDate)-1;",
None
),
("contentModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.contentModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.contentModDate)-1;",
None
),
("attributeModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.attributeModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.attributeModDate)-1;",
None
),
("accessDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.accessDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.accessDate)-1;",
None
),
("backupDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.backupDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.backupDate)-1;",
None
),
("permissions",
"return Py_BuildValue(\"(llll)\", self->ob_itself.permissions[0], self->ob_itself.permissions[1], self->ob_itself.permissions[2], self->ob_itself.permissions[3]);",
"return PyArg_Parse(v, \"(llll)\", &self->ob_itself.permissions[0], &self->ob_itself.permissions[1], &self->ob_itself.permissions[2], &self->ob_itself.permissions[3])-1;",
None
),
# XXXX FinderInfo TBD
# XXXX FinderXInfo TBD
("valence",
"return Py_BuildValue(\"l\", self->ob_itself.valence);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.valence)-1;",
None
),
("dataLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataLogicalSize)-1;",
None
),
("dataPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataPhysicalSize)-1;",
None
),
("rsrcLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcLogicalSize)-1;",
None
),
("rsrcPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcPhysicalSize)-1;",
None
),
("sharingFlags",
"return Py_BuildValue(\"l\", self->ob_itself.sharingFlags);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.sharingFlags)-1;",
None
),
("userPrivileges",
"return Py_BuildValue(\"b\", self->ob_itself.userPrivileges);",
"return PyArg_Parse(v, \"b\", &self->ob_itself.userPrivileges)-1;",
None
),
]
# The same info, but in a different form
INITFORMAT = "HhllO&O&O&O&O&llllllb"
INITARGS = """&((FSCatalogInfoObject *)_self)->ob_itself.nodeFlags,
&((FSCatalogInfoObject *)_self)->ob_itself.volume,
&((FSCatalogInfoObject *)_self)->ob_itself.parentDirID,
&((FSCatalogInfoObject *)_self)->ob_itself.nodeID,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.createDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.contentModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.attributeModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.accessDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)_self)->ob_itself.backupDate,
&((FSCatalogInfoObject *)_self)->ob_itself.valence,
&((FSCatalogInfoObject *)_self)->ob_itself.dataLogicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.dataPhysicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.rsrcLogicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.rsrcPhysicalSize,
&((FSCatalogInfoObject *)_self)->ob_itself.sharingFlags,
&((FSCatalogInfoObject *)_self)->ob_itself.userPrivileges"""
INITNAMES = """
"nodeFlags",
"volume",
"parentDirID",
"nodeID",
"createDate",
"contentModDate",
"atributeModDate",
"accessDate",
"backupDate",
"valence",
"dataLogicalSize",
"dataPhysicalSize",
"rsrcLogicalSize",
"rsrcPhysicalSize",
"sharingFlags",
"userPrivileges"
"""
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) { Py_INCREF(Py_None); return Py_None; }")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("static char *kw[] = {%s, 0};", self.INITNAMES)
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|%s\", kw, %s))",
self.INITFORMAT, self.INITARGS)
OutLbrace()
Output("return -1;")
OutRbrace()
Output("return 0;")
class FInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("Type",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdType);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdType)-1;",
"4-char file type"
),
("Creator",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdCreator);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdCreator)-1;",
"4-char file creator"
),
("Flags",
"return Py_BuildValue(\"H\", self->ob_itself.fdFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.fdFlags)-1;",
"Finder flag bits"
),
("Location",
"return Py_BuildValue(\"O&\", PyMac_BuildPoint, self->ob_itself.fdLocation);",
"return PyArg_Parse(v, \"O&\", PyMac_GetPoint, &self->ob_itself.fdLocation)-1;",
"(x, y) location of the file's icon in its parent finder window"
),
("Fldr",
"return Py_BuildValue(\"h\", self->ob_itself.fdFldr);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.fdFldr)-1;",
"Original folder, for 'put away'"
),
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("%s *itself = NULL;", self.itselftype)
Output("static char *kw[] = {\"itself\", 0};")
Output()
Output("if (PyArg_ParseTupleAndKeywords(_args, _kwds, \"|O&\", kw, FInfo_Convert, &itself))")
OutLbrace()
Output("if (itself) memcpy(&((%s *)_self)->ob_itself, itself, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return 0;")
OutRbrace()
Output("return -1;")
class FSSpecDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("data",
"return PyString_FromStringAndSize((char *)&self->ob_itself, sizeof(self->ob_itself));",
None,
"Raw data of the FSSpec object"
)
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
# We do Convert ourselves (with PyMac_GetFSxxx)
def outputConvert(self):
pass
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("PyObject *v = NULL;")
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|Os#\", kw, &v, &rawdata, &rawdatalen))")
Output("return -1;")
Output("if (v && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!v && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if (rawdatalen != sizeof(%s))", self.itselftype)
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"%s rawdata incorrect size\");",
self.itselftype)
Output("return -1;")
OutRbrace()
Output("memcpy(&((%s *)_self)->ob_itself, rawdata, rawdatalen);", self.objecttype)
Output("return 0;")
OutRbrace()
Output("if (PyMac_GetFSSpec(v, &((%s *)_self)->ob_itself)) return 0;", self.objecttype)
Output("return -1;")
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[512];")
Output("""PyOS_snprintf(buf, sizeof(buf), \"%%s((%%d, %%ld, '%%.*s'))\",
self->ob_type->tp_name,
self->ob_itself.vRefNum,
self->ob_itself.parID,
self->ob_itself.name[0], self->ob_itself.name+1);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class FSRefDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("data",
"return PyString_FromStringAndSize((char *)&self->ob_itself, sizeof(self->ob_itself));",
None,
"Raw data of the FSRef object"
)
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSRefs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
# We do Convert ourselves (with PyMac_GetFSxxx)
def outputConvert(self):
pass
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("PyObject *v = NULL;")
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|Os#\", kw, &v, &rawdata, &rawdatalen))")
Output("return -1;")
Output("if (v && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!v && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if (rawdatalen != sizeof(%s))", self.itselftype)
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"%s rawdata incorrect size\");",
self.itselftype)
Output("return -1;")
OutRbrace()
Output("memcpy(&((%s *)_self)->ob_itself, rawdata, rawdatalen);", self.objecttype)
Output("return 0;")
OutRbrace()
Output("if (PyMac_GetFSRef(v, &((%s *)_self)->ob_itself)) return 0;", self.objecttype)
Output("return -1;")
class AliasDefinition(PEP253Mixin, ObjectDefinition):
# XXXX Should inherit from resource?
getsetlist = [
("data",
"""int size;
PyObject *rv;
size = GetHandleSize((Handle)self->ob_itself);
HLock((Handle)self->ob_itself);
rv = PyString_FromStringAndSize(*(Handle)self->ob_itself, size);
HUnlock((Handle)self->ob_itself);
return rv;
""",
None,
"Raw data of the alias object"
)
]
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputStructMembers(self):
ObjectDefinition.outputStructMembers(self)
Output("void (*ob_freeit)(%s ptr);", self.itselftype)
def outputInitStructMembers(self):
ObjectDefinition.outputInitStructMembers(self)
Output("it->ob_freeit = NULL;")
def outputCleanupStructMembers(self):
Output("if (self->ob_freeit && self->ob_itself)")
OutLbrace()
Output("self->ob_freeit(self->ob_itself);")
OutRbrace()
Output("self->ob_itself = NULL;")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("((%s *)self)->ob_itself = NULL;", self.objecttype)
Output("return self;")
def output_tp_initBody(self):
Output("%s itself = NULL;", self.itselftype)
Output("char *rawdata = NULL;")
Output("int rawdatalen = 0;")
Output("Handle h;")
Output("static char *kw[] = {\"itself\", \"rawdata\", 0};")
Output()
Output("if (!PyArg_ParseTupleAndKeywords(_args, _kwds, \"|O&s#\", kw, %s_Convert, &itself, &rawdata, &rawdatalen))",
self.prefix)
Output("return -1;")
Output("if (itself && rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"Only one of itself or rawdata may be specified\");")
Output("return -1;")
OutRbrace()
Output("if (!itself && !rawdata)")
OutLbrace()
Output("PyErr_SetString(PyExc_TypeError, \"One of itself or rawdata must be specified\");")
Output("return -1;")
OutRbrace()
Output("if (rawdata)")
OutLbrace()
Output("if ((h = NewHandle(rawdatalen)) == NULL)")
OutLbrace()
Output("PyErr_NoMemory();")
Output("return -1;")
OutRbrace()
Output("HLock(h);")
Output("memcpy((char *)*h, rawdata, rawdatalen);")
Output("HUnlock(h);")
Output("((%s *)_self)->ob_itself = (%s)h;", self.objecttype, self.itselftype)
Output("return 0;")
OutRbrace()
Output("((%s *)_self)->ob_itself = itself;", self.objecttype)
Output("return 0;")
# Alias methods come in two flavors: those with the alias as arg1 and
# those with the alias as arg 2.
class Arg2MethodGenerator(OSErrMethodGenerator):
"""Similar to MethodGenerator, but has self as second argument"""
def parseArgumentList(self, args):
args0, arg1, argsrest = args[:1], args[1], args[2:]
t0, n0, m0 = arg1
args = args0 + argsrest
if m0 != InMode:
raise ValueError, "method's 'self' must be 'InMode'"
self.itself = Variable(t0, "_self->ob_itself", SelfMode)
FunctionGenerator.parseArgumentList(self, args)
self.argumentList.insert(2, self.itself)
# From here on it's basically all boiler plate...
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff,
longname=LONGMODNAME)
fscataloginfoobject = FSCatalogInfoDefinition('FSCatalogInfo', 'FSCatalogInfo', 'FSCatalogInfo')
finfoobject = FInfoDefinition('FInfo', 'FInfo', 'FInfo')
aliasobject = AliasDefinition('Alias', 'Alias', 'AliasHandle')
fsspecobject = FSSpecDefinition('FSSpec', 'FSSpec', 'FSSpec')
fsrefobject = FSRefDefinition('FSRef', 'FSRef', 'FSRef')
module.addobject(fscataloginfoobject)
module.addobject(finfoobject)
module.addobject(aliasobject)
module.addobject(fsspecobject)
module.addobject(fsrefobject)
# Create the generator classes used to populate the lists
Function = OSErrFunctionGenerator
Method = OSErrMethodGenerator
# Create and populate the lists
functions = []
alias_methods = []
fsref_methods = []
fsspec_methods = []
execfile(INPUTFILE)
# Manual generators:
FSRefMakePath_body = """
OSStatus _err;
#define MAXPATHNAME 1024
UInt8 path[MAXPATHNAME];
UInt32 maxPathSize = MAXPATHNAME;
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_err = FSRefMakePath(&_self->ob_itself,
path,
maxPathSize);
if (_err != noErr) return PyMac_Error(_err);
_res = Py_BuildValue("s", path);
return _res;
"""
f = ManualGenerator("FSRefMakePath", FSRefMakePath_body)
f.docstring = lambda: "() -> string"
fsref_methods.append(f)
FSRef_as_pathname_body = """
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_res = FSRef_FSRefMakePath(_self, _args);
return _res;
"""
f = ManualGenerator("as_pathname", FSRef_as_pathname_body)
f.docstring = lambda: "() -> string"
fsref_methods.append(f)
FSSpec_as_pathname_body = """
char strbuf[1024];
OSErr err;
if (!PyArg_ParseTuple(_args, ""))
return NULL;
err = _PyMac_GetFullPathname(&_self->ob_itself, strbuf, sizeof(strbuf));
if ( err ) {
PyMac_Error(err);
return NULL;
}
_res = PyString_FromString(strbuf);
return _res;
"""
f = ManualGenerator("as_pathname", FSSpec_as_pathname_body)
f.docstring = lambda: "() -> string"
fsspec_methods.append(f)
FSSpec_as_tuple_body = """
if (!PyArg_ParseTuple(_args, ""))
return NULL;
_res = Py_BuildValue("(iis#)", _self->ob_itself.vRefNum, _self->ob_itself.parID,
&_self->ob_itself.name[1], _self->ob_itself.name[0]);
return _res;
"""
f = ManualGenerator("as_tuple", FSSpec_as_tuple_body)
f.docstring = lambda: "() -> (vRefNum, dirID, name)"
fsspec_methods.append(f)
pathname_body = """
PyObject *obj;
if (!PyArg_ParseTuple(_args, "O", &obj))
return NULL;
if (PyString_Check(obj)) {
Py_INCREF(obj);
return obj;
}
if (PyUnicode_Check(obj))
return PyUnicode_AsEncodedString(obj, "utf8", "strict");
_res = PyObject_CallMethod(obj, "as_pathname", NULL);
return _res;
"""
f = ManualGenerator("pathname", pathname_body)
f.docstring = lambda: "(str|unicode|FSSpec|FSref) -> pathname"
functions.append(f)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
for f in alias_methods: aliasobject.add(f)
for f in fsspec_methods: fsspecobject.add(f)
for f in fsref_methods: fsrefobject.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
|
GaneshmKumar/Alertify | refs/heads/master | alertify/exception.py | 1 | class InvalidArgument():
def __init__(self, arg):
self.args = arg
class PassArgument():
def __init__(self, arg):
self.args = arg
|
drkitty/cyder | refs/heads/master | cyder/cydns/ptr/tests/__init__.py | 12133432 | |
ccn-2m/django | refs/heads/master | tests/modeltests/get_or_create/__init__.py | 12133432 | |
flp9001/eveggie | refs/heads/master | eveggie/orders/__init__.py | 12133432 | |
pwong-mapr/private-hue | refs/heads/HUE-1096-abe | desktop/core/ext-py/Django-1.4.5/django/core/cache/backends/__init__.py | 12133432 | |
siskin/bluetooth-next | refs/heads/feature-rpl | scripts/gdb/linux/__init__.py | 2010 | # nothing to do for the initialization of this package
|
twneale/vertica-python | refs/heads/master | vertica_python/vertica/messages/backend_messages/empty_query_response.py | 3 |
from vertica_python.vertica.messages.message import BackendMessage
class EmptyQueryResponse(BackendMessage):
pass
EmptyQueryResponse._message_id(b'I')
|
rdhyee/osf.io | refs/heads/develop | admin_tests/base/test_forms.py | 41 | from nose.tools import * # flake8: noqa
from tests.base import AdminTestCase
from admin.base.forms import GuidForm
class TestGuidForm(AdminTestCase):
def setUp(self):
super(TestGuidForm, self).setUp()
def test_valid_data(self):
guid = '12345'
form = GuidForm({
'guid': guid,
})
assert_true(form.is_valid())
assert_equal(form.cleaned_data.get('guid'), guid)
def test_blank_data(self):
form = GuidForm({})
assert_false(form.is_valid())
assert_equal(form.errors, {
'guid': [u'This field is required.'],
})
|
temmeand/scikit-rf | refs/heads/master | examples/oneport_calibration_example/cal.py | 14 | import skrf as rf
from pylab import *
cal = rf.Calibration(\
measured = [\
rf.Network('measured/short.s1p'),
rf.Network('measured/delay short 132um.s1p'),
rf.Network('measured/delay short 85um.s1p'),
rf.Network('measured/load.s1p'),
],
ideals =[\
rf.Network('ideals/short.s1p'),
rf.Network('ideals/delay short 132um.s1p'),
rf.Network('ideals/delay short 85um.s1p'),
rf.Network('ideals/load.s1p'),
],
)
ro_meas = rf.Network('dut/radiating open.s1p')
ro_cal = cal.apply_cal(ro_meas)
ro_sim = rf.Network('simulation/radiating open.s1p')
figure()
ro_cal.plot_s_db(label='Experiment')
ro_sim.plot_s_db(label='Simulated')
draw();show();
|
captivo/linux-captivo | refs/heads/captivo-picture | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
johnmgregoire/JCAPdatavis | refs/heads/master | echem_plate_math.py | 2 | import numpy, scipy, scipy.optimize, scipy.interpolate
import os, os.path, time, copy, pylab, operator
from scipy import interp
def myeval(c):
if c=='None':
c=None
elif c=='nan' or c=='NaN':
c=numpy.nan
else:
temp=c.lstrip('0')
if (temp=='' or temp=='.') and '0' in c:
c=0
else:
c=eval(temp)
return c
def removeoutliers_meanstd(arr, nptsoneside, nsig, gapptsoneside=0): #avrages maximum of 2*nptoneside points and usees distance from mean scaled by std compared to nsig to determine if the value should be replaced by the mean. if gapptsoneside>0, will do this leaving a gap around the point in question and using nptsoneside-gaps points for the mean and std
if nptsoneside==1 and gapptsoneside==0:
return removesinglepixoutliers(arr, critratiotoneighbors=nsig)
nsig=max(nsig, 1.)
nptsoneside=max(nptsoneside, 2.)
gapptsoneside=min(gapptsoneside, nptsoneside-2.)
for gap in range(gapptsoneside+1):
starti=numpy.uint32([max(i-(nptsoneside-gap), 0) for i in range(len(arr))])
stopi=numpy.uint32([min(i+(nptsoneside-gap)+1, len(arr)) for i in range(len(arr))])
#print [numpy.append(arr[i0:i], arr[i+1:i1]) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
#print [(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2, (numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
arr=numpy.array([(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2<(numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2 and (arr[i],) or (numpy.append(arr[i0:i], arr[i+1:i1]).mean(),))[0] for i, i0, i1 in zip(range(len(arr)), starti, stopi)], dtype=arr.dtype)
return arr
def CalcArrSS(x, WeightExp=1., TestPts=10):
p=WeightExp
i=TestPts
s0=x[:i].std()/i**p+1
while x[:i].std()/i**p<s0 and i<len(x):
s0=x[:i].std()/i**p
i+=TestPts
return x[:i].mean()
def xx(x, WeightExp=1., TestPts=10):
p=WeightExp
i=TestPts
s0=x[:i].std()/i**p+1
while x[:i].std()/i**p<s0 and i<len(x):
s0=x[:i].std()/i**p
i+=TestPts
return i
def concat_extrap_ends(x, npts, polyorder=1, lowside=True, highside=True):
i=numpy.arange(npts, dtype='float64')
if lowside:
ans=scipy.polyfit(-1*(i+1.), x[:npts], polyorder)
x=numpy.concatenate([scipy.polyval(list(ans), i[::-1]), x])
if highside:
ans=scipy.polyfit(-1*(i[::-1]-1.), x[-1*npts:], polyorder)
x=numpy.concatenate([x, scipy.polyval(list(ans), i)])
return x
def lininterpbetweenregularpoints(existy, interval):
existy=numpy.array(existy)
x=numpy.arange(interval,dtype='float32')/interval
diff=existy[1:]-existy[:-1]
o=numpy.outer(diff,x)
return numpy.concatenate([arr+start for arr,start in zip(o,existy[:-1])]+[existy[-1:]])
def interpwithinarr(existind, existy, order=3, interpplotax=None, interpcols=['k', 'r']):
if order==1:
existind=numpy.array(existind)
diff=existind[1:]-existind[:-1]
if numpy.all(diff==diff[0]):
return lininterpbetweenregularpoints(existy, diff[0])
interind=sorted(list(set(numpy.arange(max(existind)+1))-set(existind)))
yfull=numpy.zeros(max(existind)+1, existy.dtype)
yfull[existind]=existy[:]
yfull[interind]=scipy.interpolate.spline(existind, existy, interind, order=order)
if not interpplotax is None:
interpplotax.plot(existind, existy, interpcols[0])
interpplotax.plot(interind, yfull[interind], interpcols[1])
return yfull
def savgolsmooth(x, nptsoneside=7, order = 4, dx=1.0, deriv=0, binprior=0): #based on scipy cookbook. x is 1-d array, window is the number of points used to smooth the data, order is the order of the smoothing polynomial, will return the smoothed "deriv"th derivative of x
if nptsoneside<=1:
return x
if binprior>1:
origlen=len(x)
x=numpy.array([x[i*binprior:(i+1)*binprior].mean() for i in range(origlen//binprior)])
dx*=binprior
side=numpy.uint16(max(nptsoneside, numpy.ceil(order/2.)))
s=numpy.r_[2*x[0]-x[side:0:-1],x,2*x[-1]-x[-2:-1*side-2:-1]]
# a second order polynomal has 3 coefficients
b = numpy.mat([[k**i for i in range(order+1)] for k in range(-1*side, side+1)])
m = numpy.linalg.pinv(b).A[deriv] #this gives the dth ? of the base array (.A) of the pseudoinverse of b
# precompute the offset values for better performance
offsets = range(-1*side, side+1)
offset_data = zip(offsets, m)
smooth_data=[numpy.array([(weight * s[i + offset]) for offset, weight in offset_data]).sum() for i in xrange(side, len(s) - side)]
smooth_data=numpy.array(smooth_data)/(dx**deriv)
if binprior>1:
ia=numpy.arange(binprior, dtype='float32')/binprior
xr=numpy.concatenate([ia*(b-a)+a for a, b in zip(smooth_data[:-1], smooth_data[1:])])
xr=numpy.concatenate([(smooth_data[1]-smooth_data[0])*ia[:binprior//2]+smooth_data[0], xr, (smooth_data[-1]-smooth_data[-2])*ia[:binprior//2]+smooth_data[-1]])
smooth_data=numpy.concatenate([xr, (smooth_data[-1]-smooth_data[-2])*ia[:origlen-len(xr)]+smooth_data[-1]])
return smooth_data
class fitfcns: #datatuples are x1,x2,...,y
#.finalparams .sigmas .parnames useful, returns fitfcn(x)
def genfit(self, fcn, initparams, datatuple, markstr='unspecified', parnames=[], interaction=0, maxfev=2000, weights=None, optimizerfcn=None):
self.maxfev=maxfev
self.performfit=True
self.initparams=initparams
self.sigmas=scipy.zeros(len(initparams))
self.parnames=parnames
self.finalparams=initparams
self.error=False
if weights is None:
def wts(x):
return 1.
elif weights=='parabolic':
a=(datatuple[0][0]+datatuple[0][-1])/2.0
b=(datatuple[0][-1]-datatuple[0][0])/2.0
def wts(x):
return 1.0+((x-a)/b)**2
def res1(p, x1, y):
return (y-fcn(p, x1))*wts(x1)
def res2(p, x1,x2,y):
return y-fcn(p, x1, x2)
def res3(p, x1,x2,x3, y):
return y-fcn(p, x1, x2, x3)
def res4(p, x1,x2,x3,x4, y):
return y-fcn(p, x1, x2, x3, x4)
resdic={1:res1, 2:res2, 3:res3, 4:res4}
self.resfcn=resdic[len(datatuple)-1]
i=0
for arr in datatuple: #if the numerical data is given as a list or tuple then convert to arrays. regardless convert to float64 because leastsq REQUIRES THIS
datatuple=datatuple[0:i]+tuple([numpy.float64(arr)])+datatuple[i+1:]
i=i+1
while self.performfit:
self.sigmas=scipy.zeros(len(self.finalparams))
if not optimizerfcn is None:
try:
self.finalparams=optimizerfcn(self.resfcn,self.initparams, args=datatuple, maxfun=self.maxfev, xtol=1.e-10, ftol=1.e-10)
self.error=0
except:
self.error=1
else:
fitout = scipy.optimize.leastsq(self.resfcn,self.initparams, args=datatuple, maxfev=self.maxfev, full_output=1)#, warning=False)
self.performfit=False
self.finalparams=fitout[0]
if not fitout[4] in [1, 2]:
print 'Fitting Error ', fitout[4], ' at ', markstr,': ', fitout[3]
self.error=True
else:
#self.finalparams=fitout[0]
self.covmat=fitout[1]
try:
self.sigmas=scipy.array([self.covmat[i, i] for i in range(len(self.sigmas))])
except:
pass
def fitfcn(x):
return fcn(self.finalparams, x)
return fitfcn
def poly(self, p, x):#both must be numpy arrays
return numpy.array([p[i]*(x**i) for i in range(p.size)]).sum(0)
def polyfit(self, datatuple, initparams, markstr='unspecified', interaction=0, maxfev=2000, weights=None):
#initparams can be an array of coefficients [constant,lin term, quad term,...] or an integer indicating the order of the polynomial
if isinstance(initparams, int):
initparams=numpy.ones(initparams+1)
else:
initparams=numpy.float64(initparams)
parnames=[]
i=0
for par in initparams:
parnames+=[''.join(('coef', `i`))]
i+=1
return self.genfit(self.poly, initparams, datatuple, markstr, parnames, interaction, maxfev, weights=weights)
def gaussianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self.gaussian, initparams, datatuple, markstr, parnames=['coef', 'center', 'sigma'], interaction=interaction, maxfev=maxfev, weights=weights)
def gaussian(self, p, x):
return p[0]*scipy.exp(-0.5*((x-p[1])/p[2])**2)
def lorentzianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self, self.lorentzian, initparams, datatuple, markstr, parnames=['coef', 'center', 'gamma'], interaction=interaction, maxfev=maxfev, weights=weights)
def lorentzian(self, p, x):
return (p[0]/scipy.pi)*p[2]/((x-p[1])**2+p[2]**2)
def Gaussian(pars, x):
return pars[2]*numpy.exp(-0.5*((x-pars[0])/pars[1])**2)
def Lorentzian(pars, x):#defined in nontraditional way so that pars[2] is the peak height
return pars[2]/(1+((x-pars[0])/pars[1])**2)
def GaussLorentz(pars, x):
gw=min(max(pars[3], 0.), 1.)
return gw*Gaussian(pars, x)+(1.-gw)*Lorentzian(pars, x)
def GaussHalfLorentz(pars, x):
return .5*Gaussian(pars, x)+.5*Lorentzian(pars, x)
PeakFcnLibrary={'Gaussian':Gaussian, 'Lorentzian':Lorentzian, 'GaussHalfLorentz':GaussHalfLorentz}
def fitpeakset(X, Y, initpars, peakfcn, negpeaks=True, optimizerfcn=None, nsigcut=3.):#peak function must be a function that accepts a list of 3 parameters (the reshape 3 needs to be changed if num params differs)
numgauss=len(initpars)
if numgauss==0:
return (numpy.float32([]), numpy.float32([]), 0.)
if nsigcut is None:
imin=0
imax=len(X)
else:
xmin=initpars[0][0]
xmax=initpars[0][0]
for p, w, h in initpars:
xmin=min(xmin, p-w*nsigcut)
xmax=max(xmax, p+w*nsigcut)
imin=numpy.argmin((X-xmin)**2)
imax=numpy.argmin((X-xmax)**2)
zeroedpeakinds=[]
repeatwithpkremoved=True #peaks are removed if their fitted height is <0. At the end, these peaks are added to the fit parameter list with 0 height and 0 error
while repeatwithpkremoved:
initparscpy=copy.copy(list(initpars))
for pkind in reversed(zeroedpeakinds):#reverse so opo gets the right index
initparscpy.pop(pkind)
if len(initparscpy)==0:
break
initparsflat=numpy.float64(initparscpy).flatten()
def fitfcn(p, x):
allpars=numpy.reshape(p, (p.size//initpars.shape[1], initpars.shape[1]))
if isinstance(x, numpy.ndarray):
val=numpy.zeros(x.size, dtype='float32')
else:
val=0.0
for pars in allpars:
val+=peakfcn(pars, x)
return val
# def residfcn(p, x, y):
# err=y-fitfcn(p, x)
# return err
Ya=numpy.float64(Y[imin:imax])
Xa=numpy.float64(X[imin:imax])
#if not optimizerfcn is None:
ff=fitfcns()
ff.genfit(fitfcn, initparsflat, (Xa, Ya), optimizerfcn=optimizerfcn)
finalparams=ff.finalparams
# else:
# fitout=scipy.optimize.leastsq(residfcn, initparsflat, args=(X, Y), full_output=1)
# if not (fitout[4] in [1, 2]):
# print 'Fitting Error', fitout[4],': ', fitout[3]
# finalparams=numpy.float32(fitout[0])
finalparamsshaped=numpy.reshape(finalparams, (len(finalparams)//initpars.shape[1], initpars.shape[1]))
if negpeaks:
repeatwithpkremoved=False
else:
negpeakinds=numpy.where(finalparamsshaped[:, 2]<0)[0]
zeroedpeakinds+=list(negpeakinds)
zeroedpeakinds.sort()
repeatwithpkremoved=len(negpeakinds)>0
# print '^^^^^^^^^^^^^^^'
# print initparsflat
# print finalparamsshaped
# pylab.plot(X, Y, 'b.')
# pylab.show()
# if not (fitout[1] is None):
# covmat=fitout[1]
# sigmas=numpy.float32([covmat[i, i] for i in range(len(finalparams))])
# else:
# print 'COVARIANCE NOT CALCULATED:', fitout[4],': ', fitout[3]
# sigmas=numpy.zeros(len(finalparams), dtype='float32')
sigmas=ff.sigmas
finalresid=numpy.sqrt((ff.resfcn(finalparams, X, Y)**2).sum())
#pylab.plot(X, Y, 'k.', X, fitfcn(finalparams, X), 'r-')
sigmashaped=numpy.reshape(sigmas, (len(finalparams)//initpars.shape[1], initpars.shape[1]))
for pkind in zeroedpeakinds:
finalparamsshaped=list(finalparamsshaped)
sigmashaped=list(sigmashaped)
temp=copy.copy(initpars[pkind][:])
temp[2]=0.#zero the height
finalparamsshaped.insert(pkind, temp)
sigmashaped.insert(pkind, numpy.zeros(initpars.shape[1], dtype='float64'))
finalparamsshaped=numpy.float64(finalparamsshaped)
sigmashaped=numpy.float64(sigmashaped)
return (finalparamsshaped, sigmashaped, finalresid)
def arrayzeroind1d(arr, postoneg=False, negtopos=True):
sarr=numpy.sign(arr)
if postoneg:
zeroind=numpy.where(sarr[:-1]>sarr[1:])[0]
if negtopos:
zeroind=numpy.append(zeroind, numpy.where(sarr[:-1]*sarr[1:]<=0)[0])
else:#assume that if not postoneg then negtopos
zeroind=numpy.where(sarr[:-1]*sarr[1:]<=0)[0]
return (1.0*zeroind*arr[(zeroind+1,)]-(zeroind+1)*arr[(zeroind,)])/(arr[(zeroind+1,)]-arr[(zeroind,)]) #returns array of the floating point "index" linear interpolation between 2 indeces
def clustercoordsbymax1d(arr, pkind, critsepind):#results will be sorted. wherever there are peak indeces too close together. the peak index next to the peak index with highest arr value gets removed
pkind.sort()
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critsepind)[0]
indindshigh=indindslow+1
while indindslow.size>0:
maxindindindlow=numpy.nanargmax(arr[pkind[(indindslow,)]])
maxindindindhigh=numpy.nanargmax(arr[pkind[(indindshigh,)]])
if arr[pkind[indindslow[maxindindindlow]]]>arr[pkind[indindshigh[maxindindindhigh]]]:
pkind=numpy.delete(pkind, indindshigh[maxindindindlow])
else:
pkind=numpy.delete(pkind, indindslow[maxindindindhigh])
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critsepind)[0]
indindshigh=indindslow+1
return pkind
def peaksearch1dSG(x, dx=1., critpeakheight=10, critsepind=5, critcurve=None, firstdernpts=7, firstderorder=1, secdernpts=14, secderorder=1, pospeaks=True, negpeaks=True):
#dx is delta q for one index. zeros of the first derivative of inn are grouped together if within critsepind. only negative slope in the firstder is used so no secder is necessary unless specify a critical curvature in count nm^2
if not (pospeaks or negpeaks):
return numpy.float32([])
ifirstder=savgolsmooth(x, nptsoneside=firstdernpts, order=firstderorder, dx=dx, deriv=1)
fullpkind=numpy.float32([])
if pospeaks:
zeroind=arrayzeroind1d(ifirstder, postoneg=True, negtopos=False)
temp=numpy.where(x[(numpy.uint32(numpy.round(zeroind)),)]>critpeakheight)
fullpkind=numpy.append(fullpkind, zeroind[temp])
if negpeaks:
zeroind=arrayzeroind1d(ifirstder, postoneg=False, negtopos=True)
temp=numpy.where(x[(numpy.uint32(numpy.round(zeroind)),)]<(-1*critpeakheight))
fullpkind=numpy.append(fullpkind, zeroind[temp])
if fullpkind.size==0:
return fullpkind
pkind=clustercoordsbymax1d(x, numpy.uint32(numpy.round(fullpkind)), critsepind)
if critcurve is not None:
isecder=savgolsmooth(x, nptsoneside=secdernpts, order=secderorder, dx=dx, deriv=2)
temp=numpy.where(numpy.abs(isecder[(numpy.uint32(numpy.round(pkind)),)])>(critcurve))
pkind=numpy.array(pkind)[temp]
# pkind=list(pkind)
# pkind.reverse()#highest to smallest for pairing below
return numpy.array(pkind, dtype=numpy.float32)
def removeoutliers_meanstd(arr, nptsoneside, nsig, gapptsoneside=0): #avrages maximum of 2*nptoneside points and usees distance from mean scaled by std compared to nsig to determine if the value should be replaced by the mean. if gapptsoneside>0, will do this leaving a gap around the point in question and using nptsoneside-gaps points for the mean and std
if nptsoneside==1 and gapptsoneside==0:
return removesinglepixoutliers(arr, critratiotoneighbors=nsig)
nsig=max(nsig, 1.)
nptsoneside=max(nptsoneside, 2.)
gapptsoneside=min(gapptsoneside, nptsoneside-2.)
for gap in range(gapptsoneside+1):
starti=numpy.uint32([max(i-(nptsoneside-gap), 0) for i in range(len(arr))])
stopi=numpy.uint32([min(i+(nptsoneside-gap)+1, len(arr)) for i in range(len(arr))])
#print [numpy.append(arr[i0:i], arr[i+1:i1]) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
#print [(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2, (numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
arr=numpy.array([(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2<(numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2 and (arr[i],) or (numpy.append(arr[i0:i], arr[i+1:i1]).mean(),))[0] for i, i0, i1 in zip(range(len(arr)), starti, stopi)], dtype=arr.dtype)
return arr
def removesinglepixoutliers(arr,critratiotoneighbors=1.5):
c=numpy.where((arr[1:-1]>(critratiotoneighbors*arr[:-2]))*(arr[1:-1]>(critratiotoneighbors*arr[2:])))
c0=c[0]+1
#print len(c0), ' pixels being replaced'
arr[c0]=(arr[c0-1]+arr[c0+1])/2
return arr
def clustercoordsbymax1d(arr, pkind, critqsepind):#results will be sorted. wherever there are peak indeces too close together. the peak index next to the peak index with highest arr value gets removed
pkind.sort()
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critqsepind)[0]
indindshigh=indindslow+1
while indindslow.size>0:
maxindindindlow=numpy.argmax(arr[pkind[(indindslow,)]])
maxindindindhigh=numpy.argmax(arr[pkind[(indindshigh,)]])
if arr[pkind[indindslow[maxindindindlow]]]>arr[pkind[indindshigh[maxindindindhigh]]]:
pkind=numpy.delete(pkind, indindshigh[maxindindindlow])
else:
pkind=numpy.delete(pkind, indindslow[maxindindindhigh])
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critqsepind)[0]
indindshigh=indindslow+1
return pkind
def findlinearsegs(y, dydev_frac, dydev_nout, dn_segstart, SGnpts=10, plotbool=False, dx=1., dydev_abs=0., maxfracoutliers=.5, critdy_fracmaxdy=None, critdy_abs=None, npts_SGconstdy=2):
if 2*npts_SGconstdy+dydev_nout>=len(y):
print 'array not long enough to find linear segments'
return [], [], [], [], []
dy=savgolsmooth(y, nptsoneside=SGnpts, order = 2, dx=dx, deriv=1)
# lenconstdy=numpy.array([(dy[i]==0. and (0, ) or \
# (numpy.all((numpy.abs((dy[i:]-dy[i])/dy[i])<=dydev_frac)|(numpy.abs(dy[i:]-dy[i])<=dydev_abs)) and (len(dy)-i, ) or \
# (numpy.where(numpy.logical_not((numpy.abs((dy[i:]-dy[i])/dy[i])<=dydev_frac)|(numpy.abs(dy[i:]-dy[i])<=dydev_abs)))[0][:dydev_nout][-1],)))[0] for i in range(len(dy)-dydev_nout)])
lenconstdy=numpy.array([(dy[i]==0. and (0, ) or \
(numpy.all(numpy.abs(dy[i:]-dy[i])<max(numpy.abs(dy[i]*dydev_frac), dydev_abs)) and (len(dy)-i, ) or \
(numpy.where(numpy.logical_not(numpy.abs(dy[i:]-dy[i])<max(numpy.abs(dy[i]*dydev_frac), dydev_abs)))[0][:dydev_nout][-1],)))[0] for i in range(len(dy)-dydev_nout)])
#print len(y), len(lenconstdy), lenconstdy.max()
if len(lenconstdy)==0:
len_segs=[]
istart_segs=[]
else:
lendn=savgolsmooth(numpy.float32(lenconstdy), nptsoneside=npts_SGconstdy, order = 1, dx=1.0, deriv=1, binprior=0)
if plotbool:
pylab.figure()
pylab.plot(lenconstdy)
pylab.ylabel('initial len of consecutive "constant dy" points')
pylab.figure()
pylab.plot(lendn)
pylab.ylabel('lendn = deriv of len of const dy pts')
istart_segs=numpy.where((lendn[:-1]>0)&(lendn[1:]<0))[0]
if numpy.any(lenconstdy[:npts_SGconstdy+1]>=lenconstdy[npts_SGconstdy+1]):
itemp=numpy.argmax(lenconstdy[:npts_SGconstdy])
if not itemp in istart_segs:
istart_segs=numpy.append(itemp, istart_segs)
istart_segs[(istart_segs<npts_SGconstdy*2)]=npts_SGconstdy*2
istart_segs[(istart_segs>len(y)-1-npts_SGconstdy*2)]=len(y)-1-npts_SGconstdy*2
istart_segs+=numpy.array([numpy.argmax(lenconstdy[i-npts_SGconstdy*2:i+npts_SGconstdy*2]) for i in istart_segs])-npts_SGconstdy*2
istart_segs=numpy.array(clustercoordsbymax1d(lenconstdy, istart_segs, dn_segstart))
istart_segs=istart_segs[lenconstdy[istart_segs]>=dydev_nout/maxfracoutliers]
#
#istart=numpy.array([i for i in istart if (numpy.abs(i-istart)<dn_segstart).sum()==1 or numpy.all(lenconstdy[numpy.abs(i-istart)<dn_segstart]<=lenconstdy[i])])
#istart=numpy.array([((numpy.abs(i-istart)<dn_segstart).sum()==1 and (i,) or (numpy.median(, ))[0] for i in istart)
len_segs=lenconstdy[istart_segs]
if not critdy_abs is None:
critdy_fracmaxdy=critdy_abs/numpy.abs(dy).max()
if not critdy_fracmaxdy is None:
istart_constsegs, len_constsegs, garb, garb=findzerosegs(dy, critdy_fracmaxdy, dydev_nout, dn_segstart, SGnpts=SGnpts, plotbool=plotbool, dx=1., maxfracoutliers=maxfracoutliers)
# if len(istart_constsegs)>0:
# print istart_constsegs, len_constsegs
temp=[[i, l] for i, l in zip(istart_constsegs, len_constsegs) if numpy.min((istart_segs-i)**2)>dn_segstart**2]
if len(temp)>0:
istart_constsegs, len_constsegs=numpy.array(temp).T
istart_segs=numpy.append(istart_segs, istart_constsegs)
len_segs=numpy.append(len_segs, len_constsegs)
if len(istart_segs)==0:
return numpy.array([]), numpy.array([]), numpy.array([]), numpy.array([]), dy
#dy_segs=numpy.array([dy[i:i+l].mean() for i, l in zip(istart_segs, len_segs)])
#interc_segs=numpy.array([(y[i:i+l]-(i+numpy.arange(l))*d).mean() for i, l, d in zip(istart_segs, len_segs, dy_segs)])
fitdy_segs, fitinterc_segs=numpy.array([numpy.polyfit(dx*(i+numpy.arange(l)), y[i:i+l], 1) for i, l in zip(istart_segs, len_segs)]).T
if plotbool:
pylab.figure()
pylab.plot(y)
cols=['g', 'c', 'y', 'm']
for count, (i, l, fd, fy0) in enumerate(zip(istart_segs, len_segs, fitdy_segs, fitinterc_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fy0+i*fd*dx, 'g*')
#pylab.plot(i+numpy.arange(l),numpy.arange(l)*d+y0, 'g-')
pylab.plot(i+numpy.arange(l),(i+numpy.arange(l))*fd*dx+fy0, '-', color=c, lw=2)
pylab.figure()
pylab.plot(dy)
pylab.ylabel('dy')
for count, (i, l, fd) in enumerate(zip(istart_segs, len_segs, fitdy_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fd, 'g*')
pylab.plot(i+numpy.arange(l),numpy.ones(l)*fd, '-', color=c, lw=2)
pylab.figure()
pylab.plot(lenconstdy, 'ko')
pylab.plot(istart_segs, len_segs, 'r.')
pylab.ylabel('len of consecutive "constant dy" points ')
return istart_segs, len_segs, fitdy_segs, fitinterc_segs, dy # fit intercept is wrt the beginning of the array, index=0 not x=0
def findmatchinglinearsegs(y, dydev_frac, dydev_nout, dn_segstart, SGnpts=10, plotbool=False, dx=1., dydev_abs=0., maxfracoutliers=.5, critdy_fracmaxdy=None, critdy_abs=None):
dy=savgolsmooth(y, nptsoneside=SGnpts, order = 2, dx=dx, deriv=1)
# lenconstdy=numpy.array([(numpy.any(dyall[:,i]==0.) and (0,) or \
# (numpy.all([(numpy.abs((dy[i:]-dyall[:,i].mean())/dyall[:,i].mean())<=dydev_frac)|(numpy.abs(dy[i:]-dy[i])<=dydev_abs)) and (len(dy)-i, ) or \
# (numpy.where(numpy.logical_not((numpy.abs((dy[i:]-dy[i])/dy[i])<=dydev_frac)|(numpy.abs(dy[i:]-dy[i])<=dydev_abs)))[0][:dydev_nout][-1],)))[0] for i in range(len(dy)-dydev_nout)])
nptstemp=2
lendn=savgolsmooth(numpy.float32(lenconstdy), nptsoneside=nptstemp, order = 1, dx=1.0, deriv=1, binprior=0)
istart_segs=numpy.where((lendn[:-1]>0)&(lendn[1:]<0))[0]
istart_segs[(istart_segs<nptstemp*2)]=nptstemp*2
istart_segs[(istart_segs>len(y)-1-nptstemp*2)]=len(y)-1-nptstemp*2
istart_segs+=numpy.array([numpy.argmax(lenconstdy[i-nptstemp*2:i+nptstemp*2]) for i in istart_segs])-nptstemp*2
istart_segs=numpy.array(clustercoordsbymax1d(lenconstdy, istart_segs, dn_segstart))
istart_segs=istart_segs[lenconstdy[istart_segs]>=dydev_nout/maxfracoutliers]
#
#istart=numpy.array([i for i in istart if (numpy.abs(i-istart)<dn_segstart).sum()==1 or numpy.all(lenconstdy[numpy.abs(i-istart)<dn_segstart]<=lenconstdy[i])])
#istart=numpy.array([((numpy.abs(i-istart)<dn_segstart).sum()==1 and (i,) or (numpy.median(, ))[0] for i in istart)
len_segs=lenconstdy[istart_segs]
if not critdy_abs is None:
critdy_fracmaxdy=critdy_abs/numpy.abs(dy).max()
if not critdy_fracmaxdy is None:
istart_constsegs, len_constsegs, garb, garb=findzerosegs(dy, critdy_fracmaxdy, dydev_nout, dn_segstart, SGnpts=SGnpts, plotbool=plotbool, dx=1., maxfracoutliers=maxfracoutliers)
# if len(istart_constsegs)>0:
# print istart_constsegs, len_constsegs
temp=[[i, l] for i, l in zip(istart_constsegs, len_constsegs) if numpy.min((istart_segs-i)**2)>dn_segstart**2]
if len(temp)>0:
istart_constsegs, len_constsegs=numpy.array(temp).T
istart_segs=numpy.append(istart_segs, istart_constsegs)
len_segs=numpy.append(len_segs, len_constsegs)
if len(istart_segs)==0:
return numpy.array([]), numpy.array([]), numpy.array([]), numpy.array([]), dy
#dy_segs=numpy.array([dy[i:i+l].mean() for i, l in zip(istart_segs, len_segs)])
#interc_segs=numpy.array([(y[i:i+l]-(i+numpy.arange(l))*d).mean() for i, l, d in zip(istart_segs, len_segs, dy_segs)])
fitdy_segs, fitinterc_segs=numpy.array([numpy.polyfit(dx*(i+numpy.arange(l)), y[i:i+l], 1) for i, l in zip(istart_segs, len_segs)]).T
if plotbool:
pylab.figure()
pylab.plot(y)
cols=['g', 'c', 'y', 'm']
for count, (i, l, fd, fy0) in enumerate(zip(istart_segs, len_segs, fitdy_segs, fitinterc_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fy0+i*fd*dx, 'g*')
#pylab.plot(i+numpy.arange(l),numpy.arange(l)*d+y0, 'g-')
pylab.plot(i+numpy.arange(l),(i+numpy.arange(l))*fd*dx+fy0, '-', color=c, lw=2)
pylab.figure()
pylab.plot(dy)
pylab.ylabel('dy')
for count, (i, l, fd) in enumerate(zip(istart_segs, len_segs, fitdy_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fd, 'g*')
pylab.plot(i+numpy.arange(l),numpy.ones(l)*fd, '-', color=c, lw=2)
pylab.figure()
pylab.plot(lenconstdy, 'ko')
pylab.plot(istart_segs, len_segs, 'r.')
pylab.ylabel('len of consecutive "constant dy" points ')
return istart_segs, len_segs, fitdy_segs, fitinterc_segs, dy # fit intercept is wrt the beginning of the array, index=0 not x=0
def findzerosegs(y, yzero_maxfrac, ydev_nout, dn_segstart, SGnpts=10, plotbool=False, dx=1., maxfracoutliers=.5):#ydev_nout is number of outliers allowed in segment, dn_segstart is how close to each other the segments are allowed to start
if ydev_nout>=len(y):
print 'array not long enough to find zero segments'
return [], [], [], []
y=savgolsmooth(y, nptsoneside=SGnpts, order = 2)
yzero_maxfrac=numpy.abs(y).max()*yzero_maxfrac
lenzeroy=numpy.array([\
(numpy.all(numpy.abs(y[i:])<=yzero_maxfrac) and (len(y)-i, ) or \
(numpy.where(numpy.abs(y[i:])>yzero_maxfrac)[0][:ydev_nout][-1],))[0] for i in range(len(y)-ydev_nout)])
nptstemp=2
lendn=savgolsmooth(numpy.float32(lenzeroy), nptsoneside=nptstemp, order = 1, dx=1.0, deriv=1, binprior=0)
istart_segs=numpy.where((lendn[:-1]>0)&(lendn[1:]<0))[0]
if numpy.any(lenzeroy[:nptstemp+1]>=lenzeroy[nptstemp+1]):
itemp=numpy.argmax(lenzeroy[:nptstemp])
if not itemp in istart_segs:
istart_segs=numpy.append(itemp, istart_segs)
istart_segs[(istart_segs<nptstemp*2)]=nptstemp*2
istart_segs[(istart_segs>len(y)-1-nptstemp*2)]=len(y)-1-nptstemp*2
istart_segs+=numpy.array([numpy.argmax(lenzeroy[i-nptstemp*2:i+nptstemp*2]) for i in istart_segs])-nptstemp*2
istart_segs=numpy.array(clustercoordsbymax1d(lenzeroy, istart_segs, dn_segstart))
istart_segs=istart_segs[lenzeroy[istart_segs]>ydev_nout/maxfracoutliers]
if len(istart_segs)==0:
return numpy.array([]), numpy.array([]), numpy.array([]), numpy.array([])
#istart=numpy.array([i for i in istart if (numpy.abs(i-istart)<dn_segstart).sum()==1 or numpy.all(lenconstdy[numpy.abs(i-istart)<dn_segstart]<=lenconstdy[i])])
#istart=numpy.array([((numpy.abs(i-istart)<dn_segstart).sum()==1 and (i,) or (numpy.median(, ))[0] for i in istart)
len_segs=lenzeroy[istart_segs]
#dy_segs=numpy.array([dy[i:i+l].mean() for i, l in zip(istart_segs, len_segs)])
#interc_segs=numpy.array([(y[i:i+l]-(i+numpy.arange(l))*d).mean() for i, l, d in zip(istart_segs, len_segs, dy_segs)])
fitdy_segs, fitinterc_segs=numpy.array([numpy.polyfit(dx*(i+numpy.arange(l)), y[i:i+l], 1) for i, l in zip(istart_segs, len_segs)]).T
if plotbool:
pylab.figure()
pylab.plot(y)
cols=['g', 'c', 'y', 'm']
for count, (i, l, fd, fy0) in enumerate(zip(istart_segs, len_segs, fitdy_segs, fitinterc_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fy0+i*fd*dx, 'g*')
#pylab.plot(i+numpy.arange(l),numpy.arange(l)*d+y0, 'g-')
pylab.plot(i+numpy.arange(l),(i+numpy.arange(l))*fd*dx+fy0, '-', color=c, lw=2)
for count, (i, l, fd) in enumerate(zip(istart_segs, len_segs, fitdy_segs)):
if l==max(len_segs):
c='r'
else:
c=cols[count%len(cols)]
pylab.plot(i, fd, 'g*')
pylab.plot(i+numpy.arange(l),numpy.ones(l)*fd, '-', color=c, lw=2)
pylab.figure()
pylab.plot(lenzeroy, 'ko')
pylab.plot(istart_segs, lenzeroy[istart_segs], 'r.')
pylab.ylabel('len of consecutive "zero" points')
return istart_segs, len_segs, fitdy_segs, fitinterc_segs # fit intercept is wrt the beginning of the array, index=0 not x=0
def reggrid(x, y):
if not numpy.all((x[:-1]-x[1:])==(x[1]-x[0])):
xg=numpy.linspace(x.min(), x.max(), len(x))
yg=interp(xg, x, y)
return xg, yg
else:
return x, y
def calcmeandt_dlist(dlist):
for d in dlist:
d['dt']=(d['t(s)'][1:]-d['t(s)'][:-1]).mean()
def calcmeandEdt_dlist(dlist):
for d in dlist:
d['dt']=(d['t(s)'][1:]-d['t(s)'][:-1]).mean()
d['dE']=(numpy.abs(d['Ewe(V)'][1:]-d['Ewe(V)'][:-1])).mean()
d['dEdt']=d['dE']/d['dt']
def calcsegind_dlist(dlist, SGpts=8):
if not 'dt' in dlist[0].keys():
calcmeandt_dlist(dlist)
for d in dlist:
d['Ewe(V)_dtSG']=savgolsmooth(d['Ewe(V)'], nptsoneside=SGpts, order = 1, dx=d['dt'], deriv=1, binprior=0)
rising=d['Ewe(V)_dtSG']>=0
d['segind']=numpy.empty(len(rising), dtype='uint16')
endptcorr=int(1.5*SGpts)#remove possibiliies of segment breaks within 1.5SGpts of the edges
rising[:endptcorr+1]=rising[endptcorr+2]
rising[-endptcorr-1:]=rising[-endptcorr-2]
inds=numpy.where(rising[:-1]!=rising[1:])[0]
inds=numpy.concatenate([[0], inds, [len(rising)]])
for count, (i0, i1) in enumerate(zip(inds[:-1], inds[1:])):
d['segind'][i0:i1]=count
d['segprops_dlist']=[]
for si, i0 in zip(range(d['segind'].max()+1), inds[:-1]):
d['segprops_dlist']+=[{}]
d['segprops_dlist'][-1]['rising']=rising[i0+1]
inds2=numpy.where(d['segind']==si)[0]
d['segprops_dlist'][-1]['inds']=inds2
d['segprops_dlist'][-1]['npts']=len(d['segprops_dlist'][-1]['inds'])
d['segprops_dlist'][-1]['dEdt']=d['Ewe(V)_dtSG'][inds2][SGpts:-SGpts].mean()
def manualsegind_dlist(dlist, istart=[0], SGpts=8):
if not 'dt' in dlist[0].keys():
calcmeandt_dlist(dlist)
for d in dlist:
d['Ewe(V)_dtSG']=savgolsmooth(d['Ewe(V)'], nptsoneside=SGpts, order = 1, dx=d['dt'], deriv=1, binprior=0)
d['segprops_dlist']=[]
d['segind']=numpy.empty(len(d['Ewe(V)']), dtype='uint16')
iend=istart[1:]+[len(d['Ewe(V)'])]
for count, (i0, i1) in enumerate(zip(istart, iend)):
d['segind'][i0:i1]=count
d['segprops_dlist']+=[{}]
inds2=numpy.arange(i0, i1)
d['segprops_dlist'][-1]['inds']=inds2
d['segprops_dlist'][-1]['npts']=i1-i0
d['segprops_dlist'][-1]['dEdt']=d['Ewe(V)_dtSG'][inds2][SGpts:-SGpts].mean()
d['segprops_dlist'][-1]['rising']=d['segprops_dlist'][-1]['dEdt']>0.
def SegSG_dlist(dlist, SGpts=10, order=1, k='I(A)'):
kSG=k+'_SG'
for d in dlist:
d[kSG]=numpy.zeros(d[k].shape, dtype='float32')
for segd in d['segprops_dlist']:
inds=segd['inds']
d[kSG][inds]=numpy.float32(savgolsmooth(d[k][inds], nptsoneside=SGpts, order = order, deriv=0, binprior=0))
def SegdtSG_dlist(dlist, SGpts=10, order=1, k='I(A)', dxk='dt'):
kSG=k+'_dtSG'
for d in dlist:
if not kSG in d.keys() and dxk in d.keys():
continue
d[kSG]=numpy.zeros(d[k].shape, dtype='float32')
for segd in d['segprops_dlist']:
inds=segd['inds']
d[kSG][inds]=numpy.float32(savgolsmooth(d[k][inds], nptsoneside=SGpts, order = order, deriv=1, binprior=0, dx=d[dxk]))
def calcIllum_LVsettings(d, lv4tuple, savekey='Illumcalc'):
lv_dark, lv_ill, lv_duty, lv_period=lv4tuple
illum=numpy.zeros(len(d['t(s)']), dtype='bool')
indsill=numpy.where((d['t(s)']>lv_dark)&(d['t(s)']<=lv_ill))[0]
till=d['t(s)'][indsill]
till-=till[0]
cycfrac=(till%lv_period)/lv_period
illum[indsill[cycfrac<=lv_duty]]=1
d[savekey]=illum
def calcdiff_stepill(d, ikey='Illum', ykeys=['Ewe(V)'], xkeys=['t(s)', 'I(A)'], illfracrange=(.4, .95), darkfracrange=(.4, .95)):
if isinstance(ikey, list) or isinstance(ikey, numpy.ndarray):
calcIllum_LVsettings(d, ikey, savekey='Illumcalc')
illum=d['Illumcalc']
else:
illum=d[ikey]!=0
istart_len_calc=lambda startind, endind, fracrange: (startind+numpy.floor(fracrange[0]*(endind-startind)), numpy.ceil((fracrange[1]-fracrange[0])*(endind-startind)))
riseinds=numpy.where(illum[1:]&numpy.logical_not(illum[:-1]))[0]+1
fallinds=numpy.where(numpy.logical_not(illum[1:])&illum[:-1])[0]+1
if len(fallinds)==0 and len(riseinds)==0:
print 'insufficint light cycles'
return 1
if illum[0]:
illstart=0
illend=fallinds[0]
darkstart=fallinds[0]
if len(riseinds)==0:
darkend=len(illum)
else:
darkend=riseinds[0]
else:
darkstart=0
darkend=riseinds[0]
illstart=riseinds[0]
if len(fallinds)==0:
illend=len(illum)
else:
illend=fallinds[0]
ill_istart, ill_len=istart_len_calc(illstart, illend, illfracrange)
dark_istart, dark_len=istart_len_calc(darkstart, darkend, darkfracrange)
inds_ill=[range(int(ill_istart), int(ill_istart+ill_len))]
inds_dark=[range(int(dark_istart), int(dark_istart+dark_len))]
d['inds_ill']=inds_ill
d['inds_dark']=inds_dark
getillvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_ill])
getdarkvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_dark])
for k in xkeys+ykeys:
d[k+'_ill']=getillvals(d[k])
d[k+'_dark']=getdarkvals(d[k])
for k in ykeys:
d[k+'_illdiffmean']=d[k+'_ill'][0]-d[k+'_dark'][0]
d[k+'_illdiff']=numpy.array(d[k+'_illdiffmean'])
return 0
def calcdiff_choppedill(d, ikey='Illum', ykeys=['I(A)'], xkeys=['t(s)', 'Ewe(V)'], illfracrange=(.4, .95), darkfracrange=(.4, .95)):
if isinstance(ikey, list) or isinstance(ikey, numpy.ndarray):
calcIllum_LVsettings(d, ikey, savekey='Illumcalc')
illum=d['Illumcalc']
else:
illum=d[ikey]!=0
istart_len_calc=lambda startind, endind, fracrange: (startind+numpy.floor(fracrange[0]*(endind-startind)), numpy.ceil((fracrange[1]-fracrange[0])*(endind-startind)))
riseinds=numpy.where(illum[1:]&numpy.logical_not(illum[:-1]))[0]+1
fallinds=numpy.where(numpy.logical_not(illum[1:])&illum[:-1])[0]+1
if len(fallinds)<2 or len(riseinds)==0:
print 'insufficint light cycles'
return 1
riseinds=riseinds[riseinds<fallinds[-1]]#only consider illum if there is a dark before and after
fallinds=fallinds[fallinds>riseinds[0]]
if len(fallinds)<2 or len(riseinds)==0:
print 'insufficint light cycles'
return 1
ill_istart, ill_len=istart_len_calc(riseinds, fallinds, illfracrange)
darkstart, darkend=numpy.where(numpy.logical_not(illum))[0][[0, -1]]
dark_istart, dark_len=istart_len_calc(numpy.concatenate([[darkstart], fallinds]), numpy.concatenate([riseinds, [darkend]]), darkfracrange)
#inds_ill=[range(int(i0), int(i0+ilen)) for i0, ilen in zip(ill_istart, ill_len)]
#inds_dark=[range(int(i0), int(i0+ilen)) for i0, ilen in zip(dark_istart, dark_len)]
indstemp=[(range(int(i0ill), int(i0ill+ilenill)), range(int(i0dark), int(i0dark+ilendark))) for i0ill, ilenill, i0dark, ilendark in zip(ill_istart, ill_len, dark_istart, dark_len) if ilenill>0 and ilendark>0]
inds_ill=map(operator.itemgetter(0), indstemp)
inds_dark=map(operator.itemgetter(1), indstemp)
if dark_len[-1]>0:
inds_dark+=[range(int(dark_istart[-1]), int(dark_istart[-1]+dark_len[-1]))]
else:
inds_ill=inds_ill[:-1]
d['inds_ill']=inds_ill
d['inds_dark']=inds_dark
getillvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_ill])
getdarkvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_dark])
for k in xkeys+ykeys:
d[k+'_ill']=getillvals(d[k])
d[k+'_dark']=getdarkvals(d[k])
for k in ykeys:
d[k+'_illdiff']=d[k+'_ill']-0.5*(d[k+'_dark'][:-1]+d[k+'_dark'][1:])
d[k+'_illdiffmean']=numpy.mean(d[k+'_illdiff'])
d[k+'_illdiffstd']=numpy.std(d[k+'_illdiff'])
return 0
def calcdiff_ill_caller(d, ikey='Illum', thresh=0, **kwargs):
if isinstance(ikey, list) or isinstance(ikey, numpy.ndarray):
calcIllum_LVsettings(d, ikey, savekey='Illumcalc')
illum=d['Illumcalc']
ikey='Illumcalc'
else:
illum=d[ikey]>thresh
riseinds=numpy.where(illum[1:]&numpy.logical_not(illum[:-1]))[0]+1
fallinds=numpy.where(numpy.logical_not(illum[1:])&illum[:-1])[0]+1
d['IllumBool']=illum
if len(riseinds)==0 or len(fallinds)==0 or (len(riseinds)==1 and len(fallinds)==1 and riseinds[0]>fallinds[0]):
err=calcdiff_stepill(d, ikey=ikey, **kwargs)
else:
err=calcdiff_choppedill(d, ikey='IllumBool', **kwargs)
return err
def illumtimeshift(d, ikey, tkey, tshift):
tmod=d[tkey]-tshift
inds=[numpy.argmin((t-tmod)**2) for t in d[tkey]]
return d[ikey][inds]
|
jamesbulpin/xcp-xen-4.1 | refs/heads/master | tools/python/xen/xend/XendPSCSI.py | 43 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright FUJITSU LIMITED 2008
# Masaki Kanno <kanno.masaki@jp.fujitsu.com>
#============================================================================
from xen.xend.XendBase import XendBase
from xen.xend.XendBase import XendAPIStore
from xen.xend import uuid as genuuid
class XendPSCSI(XendBase):
"""Representation of a physical SCSI device."""
def getClass(self):
return "PSCSI"
def getAttrRO(self):
attrRO = ['host',
'physical_host',
'physical_channel',
'physical_target',
'physical_lun',
'physical_HCTL',
'HBA',
'vendor_name',
'model',
'type_id',
'type',
'dev_name',
'sg_name',
'revision',
'scsi_id',
'scsi_level']
return XendBase.getAttrRO() + attrRO
def getAttrRW(self):
attrRW = []
return XendBase.getAttrRW() + attrRW
def getAttrInst(self):
attrInst = []
return XendBase.getAttrInst() + attrInst
def getMethods(self):
methods = []
return XendBase.getMethods() + methods
def getFuncs(self):
funcs = []
return XendBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getAttrRW = classmethod(getAttrRW)
getAttrInst = classmethod(getAttrInst)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def get_by_HCTL(self, physical_HCTL):
for pscsi in XendAPIStore.get_all("PSCSI"):
if pscsi.get_physical_HCTL() == physical_HCTL:
return pscsi.get_uuid()
return None
get_by_HCTL = classmethod(get_by_HCTL)
def __init__(self, uuid, record):
self.physical_HCTL = record['physical_HCTL']
self.physical_HBA = record['HBA']
self.vendor_name = record['vendor_name']
self.model = record['model']
self.type_id = record['type_id']
self.type = record['type']
self.dev_name = record['dev_name']
self.sg_name = record['sg_name']
self.revision = record['revision']
self.scsi_id = record['scsi_id']
self.scsi_level = record['scsi_level']
p_hctl = self.physical_HCTL.split(':')
self.physical_host = int(p_hctl[0])
self.physical_channel = int(p_hctl[1])
self.physical_target = int(p_hctl[2])
self.physical_lun = int(p_hctl[3])
XendBase.__init__(self, uuid, record)
def get_host(self):
from xen.xend import XendNode
return XendNode.instance().get_uuid()
def get_physical_host(self):
return self.physical_host
def get_physical_channel(self):
return self.physical_channel
def get_physical_target(self):
return self.physical_target
def get_physical_lun(self):
return self.physical_lun
def get_physical_HCTL(self):
return self.physical_HCTL
def get_HBA(self):
return self.physical_HBA
def get_vendor_name(self):
return self.vendor_name
def get_model(self):
return self.model
def get_type_id(self):
return self.type_id
def get_type(self):
return self.type
def get_dev_name(self):
return self.dev_name
def get_sg_name(self):
return self.sg_name
def get_revision(self):
return self.revision
def get_scsi_id(self):
return self.scsi_id
def get_scsi_level(self):
return self.scsi_level
class XendPSCSI_HBA(XendBase):
"""Representation of a physical SCSI HBA."""
def getClass(self):
return "PSCSI_HBA"
def getAttrRO(self):
attrRO = ['host',
'physical_host',
'PSCSIs']
return XendBase.getAttrRO() + attrRO
def getAttrRW(self):
attrRW = []
return XendBase.getAttrRW() + attrRW
def getAttrInst(self):
attrInst = []
return XendBase.getAttrInst() + attrInst
def getMethods(self):
methods = []
return XendBase.getMethods() + methods
def getFuncs(self):
funcs = []
return XendBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getAttrRW = classmethod(getAttrRW)
getAttrInst = classmethod(getAttrInst)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def get_by_physical_host(self, physical_host):
for pscsi_HBA in XendAPIStore.get_all('PSCSI_HBA'):
if pscsi_HBA.get_physical_host() == physical_host:
return pscsi_HBA.get_uuid()
return None
get_by_physical_host = classmethod(get_by_physical_host)
def __init__(self, uuid, record):
self.physical_host = record['physical_host']
XendBase.__init__(self, uuid, record)
def get_host(self):
from xen.xend import XendNode
return XendNode.instance().get_uuid()
def get_physical_host(self):
return self.physical_host
def get_PSCSIs(self):
PSCSIs = []
uuid = self.get_uuid()
for pscsi in XendAPIStore.get_all('PSCSI'):
if pscsi.get_HBA() == uuid:
PSCSIs.append(pscsi.get_uuid())
return PSCSIs
|
ArianeFire/HaniCam | refs/heads/master | facerec-master/py/facerec/model.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
from facerec.feature import AbstractFeature
from facerec.classifier import AbstractClassifier
class PredictableModel(object):
def __init__(self, feature, classifier):
if not isinstance(feature, AbstractFeature):
raise TypeError("feature must be of type AbstractFeature!")
if not isinstance(classifier, AbstractClassifier):
raise TypeError("classifier must be of type AbstractClassifier!")
self.feature = feature
self.classifier = classifier
def compute(self, X, y):
features = self.feature.compute(X,y)
self.classifier.compute(features,y)
def predict(self, X):
q = self.feature.extract(X)
return self.classifier.predict(q)
def __repr__(self):
feature_repr = repr(self.feature)
classifier_repr = repr(self.classifier)
return "PredictableModel (feature=%s, classifier=%s)" % (feature_repr, classifier_repr)
|
felixfontein/ansible | refs/heads/devel | test/units/cli/test_galaxy.py | 18 | # -*- coding: utf-8 -*-
# (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ansible
from io import BytesIO
import json
import os
import pytest
import shutil
import stat
import tarfile
import tempfile
import yaml
import ansible.constants as C
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.galaxy import collection
from ansible.galaxy.api import GalaxyAPI
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
from units.compat import unittest
from units.compat.mock import patch, MagicMock
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
class TestGalaxy(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''creating prerequisites for installing a role; setUpClass occurs ONCE whereas setUp occurs with every method tested.'''
# class data for easy viewing: role_dir, role_tar, role_name, role_req, role_path
cls.temp_dir = tempfile.mkdtemp(prefix='ansible-test_galaxy-')
os.chdir(cls.temp_dir)
if os.path.exists("./delete_me"):
shutil.rmtree("./delete_me")
# creating framework for a role
gc = GalaxyCLI(args=["ansible-galaxy", "init", "--offline", "delete_me"])
gc.run()
cls.role_dir = "./delete_me"
cls.role_name = "delete_me"
# making a temp dir for role installation
cls.role_path = os.path.join(tempfile.mkdtemp(), "roles")
if not os.path.isdir(cls.role_path):
os.makedirs(cls.role_path)
# creating a tar file name for class data
cls.role_tar = './delete_me.tar.gz'
cls.makeTar(cls.role_tar, cls.role_dir)
# creating a temp file with installation requirements
cls.role_req = './delete_me_requirements.yml'
fd = open(cls.role_req, "w")
fd.write("- 'src': '%s'\n 'name': '%s'\n 'path': '%s'" % (cls.role_tar, cls.role_name, cls.role_path))
fd.close()
@classmethod
def makeTar(cls, output_file, source_dir):
''' used for making a tarfile from a role directory '''
# adding directory into a tar file
try:
tar = tarfile.open(output_file, "w:gz")
tar.add(source_dir, arcname=os.path.basename(source_dir))
except AttributeError: # tarfile obj. has no attribute __exit__ prior to python 2. 7
pass
finally: # ensuring closure of tarfile obj
tar.close()
@classmethod
def tearDownClass(cls):
'''After tests are finished removes things created in setUpClass'''
# deleting the temp role directory
if os.path.exists(cls.role_dir):
shutil.rmtree(cls.role_dir)
if os.path.exists(cls.role_req):
os.remove(cls.role_req)
if os.path.exists(cls.role_tar):
os.remove(cls.role_tar)
if os.path.isdir(cls.role_path):
shutil.rmtree(cls.role_path)
os.chdir('/')
shutil.rmtree(cls.temp_dir)
def setUp(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
self.default_args = ['ansible-galaxy']
def tearDown(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
def test_init(self):
galaxy_cli = GalaxyCLI(args=self.default_args)
self.assertTrue(isinstance(galaxy_cli, GalaxyCLI))
def test_display_min(self):
gc = GalaxyCLI(args=self.default_args)
role_info = {'name': 'some_role_name'}
display_result = gc._display_role_info(role_info)
self.assertTrue(display_result.find('some_role_name') > -1)
def test_display_galaxy_info(self):
gc = GalaxyCLI(args=self.default_args)
galaxy_info = {}
role_info = {'name': 'some_role_name',
'galaxy_info': galaxy_info}
display_result = gc._display_role_info(role_info)
if display_result.find('\n\tgalaxy_info:') == -1:
self.fail('Expected galaxy_info to be indented once')
def test_run(self):
''' verifies that the GalaxyCLI object's api is created and that execute() is called. '''
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--ignore-errors", "imaginary_role"])
gc.parse()
with patch.object(ansible.cli.CLI, "run", return_value=None) as mock_run:
gc.run()
# testing
self.assertIsInstance(gc.galaxy, ansible.galaxy.Galaxy)
self.assertEqual(mock_run.call_count, 1)
self.assertTrue(isinstance(gc.api, ansible.galaxy.api.GalaxyAPI))
def test_execute_remove(self):
# installing role
gc = GalaxyCLI(args=["ansible-galaxy", "install", "-p", self.role_path, "-r", self.role_req, '--force'])
gc.run()
# location where the role was installed
role_file = os.path.join(self.role_path, self.role_name)
# removing role
# Have to reset the arguments in the context object manually since we're doing the
# equivalent of running the command line program twice
co.GlobalCLIArgs._Singleton__instance = None
gc = GalaxyCLI(args=["ansible-galaxy", "remove", role_file, self.role_name])
gc.run()
# testing role was removed
removed_role = not os.path.exists(role_file)
self.assertTrue(removed_role)
def test_exit_without_ignore_without_flag(self):
''' tests that GalaxyCLI exits with the error specified if the --ignore-errors flag is not used '''
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name"])
with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
# testing that error expected is raised
self.assertRaises(AnsibleError, gc.run)
self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
def test_exit_without_ignore_with_flag(self):
''' tests that GalaxyCLI exits without the error specified if the --ignore-errors flag is used '''
# testing with --ignore-errors flag
gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name", "--ignore-errors"])
with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
gc.run()
self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
def test_parse_no_action(self):
''' testing the options parser when no action is given '''
gc = GalaxyCLI(args=["ansible-galaxy", ""])
self.assertRaises(SystemExit, gc.parse)
def test_parse_invalid_action(self):
''' testing the options parser when an invalid action is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "NOT_ACTION"])
self.assertRaises(SystemExit, gc.parse)
def test_parse_delete(self):
''' testing the options parser when the action 'delete' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "delete", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_import(self):
''' testing the options parser when the action 'import' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "import", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['wait'], True)
self.assertEqual(context.CLIARGS['reference'], None)
self.assertEqual(context.CLIARGS['check_status'], False)
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_info(self):
''' testing the options parser when the action 'info' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "info", "foo", "bar"])
gc.parse()
self.assertEqual(context.CLIARGS['offline'], False)
def test_parse_init(self):
''' testing the options parser when the action 'init' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "init", "foo"])
gc.parse()
self.assertEqual(context.CLIARGS['offline'], False)
self.assertEqual(context.CLIARGS['force'], False)
def test_parse_install(self):
''' testing the options parser when the action 'install' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "install"])
gc.parse()
self.assertEqual(context.CLIARGS['ignore_errors'], False)
self.assertEqual(context.CLIARGS['no_deps'], False)
self.assertEqual(context.CLIARGS['requirements'], None)
self.assertEqual(context.CLIARGS['force'], False)
def test_parse_list(self):
''' testing the options parser when the action 'list' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "list"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_remove(self):
''' testing the options parser when the action 'remove' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "remove", "foo"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
def test_parse_search(self):
''' testing the options parswer when the action 'search' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "search"])
gc.parse()
self.assertEqual(context.CLIARGS['platforms'], None)
self.assertEqual(context.CLIARGS['galaxy_tags'], None)
self.assertEqual(context.CLIARGS['author'], None)
def test_parse_setup(self):
''' testing the options parser when the action 'setup' is given '''
gc = GalaxyCLI(args=["ansible-galaxy", "setup", "source", "github_user", "github_repo", "secret"])
gc.parse()
self.assertEqual(context.CLIARGS['verbosity'], 0)
self.assertEqual(context.CLIARGS['remove_id'], None)
self.assertEqual(context.CLIARGS['setup_list'], False)
class ValidRoleTests(object):
expected_role_dirs = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
@classmethod
def setUpRole(cls, role_name, galaxy_args=None, skeleton_path=None, use_explicit_type=False):
if galaxy_args is None:
galaxy_args = []
if skeleton_path is not None:
cls.role_skeleton_path = skeleton_path
galaxy_args += ['--role-skeleton', skeleton_path]
# Make temp directory for testing
cls.test_dir = tempfile.mkdtemp()
if not os.path.isdir(cls.test_dir):
os.makedirs(cls.test_dir)
cls.role_dir = os.path.join(cls.test_dir, role_name)
cls.role_name = role_name
# create role using default skeleton
args = ['ansible-galaxy']
if use_explicit_type:
args += ['role']
args += ['init', '-c', '--offline'] + galaxy_args + ['--init-path', cls.test_dir, cls.role_name]
gc = GalaxyCLI(args=args)
gc.run()
cls.gc = gc
if skeleton_path is None:
cls.role_skeleton_path = gc.galaxy.default_role_skeleton_path
@classmethod
def tearDownClass(cls):
if os.path.isdir(cls.test_dir):
shutil.rmtree(cls.test_dir)
def test_metadata(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('galaxy_info', metadata, msg='unable to find galaxy_info in metadata')
self.assertIn('dependencies', metadata, msg='unable to find dependencies in metadata')
def test_readme(self):
readme_path = os.path.join(self.role_dir, 'README.md')
self.assertTrue(os.path.exists(readme_path), msg='Readme doesn\'t exist')
def test_main_ymls(self):
need_main_ymls = set(self.expected_role_dirs) - set(['meta', 'tests', 'files', 'templates'])
for d in need_main_ymls:
main_yml = os.path.join(self.role_dir, d, 'main.yml')
self.assertTrue(os.path.exists(main_yml))
expected_string = "---\n# {0} file for {1}".format(d, self.role_name)
with open(main_yml, 'r') as f:
self.assertEqual(expected_string, f.read().strip())
def test_role_dirs(self):
for d in self.expected_role_dirs:
self.assertTrue(os.path.isdir(os.path.join(self.role_dir, d)), msg="Expected role subdirectory {0} doesn't exist".format(d))
def test_travis_yml(self):
with open(os.path.join(self.role_dir, '.travis.yml'), 'r') as f:
contents = f.read()
with open(os.path.join(self.role_skeleton_path, '.travis.yml'), 'r') as f:
expected_contents = f.read()
self.assertEqual(expected_contents, contents, msg='.travis.yml does not match expected')
def test_readme_contents(self):
with open(os.path.join(self.role_dir, 'README.md'), 'r') as readme:
contents = readme.read()
with open(os.path.join(self.role_skeleton_path, 'README.md'), 'r') as f:
expected_contents = f.read()
self.assertEqual(expected_contents, contents, msg='README.md does not match expected')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertEqual(test_playbook[0]['remote_user'], 'root')
self.assertListEqual(test_playbook[0]['roles'], [self.role_name], msg='The list of roles included in the test play doesn\'t match')
class TestGalaxyInitDefault(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole(role_name='delete_me')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
class TestGalaxyInitAPB(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole('delete_me_apb', galaxy_args=['--type=apb'])
def test_metadata_apb_tag(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('apb', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='apb tag not set in role metadata')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
def test_apb_yml(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'apb.yml')), msg='apb.yml was not created')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertFalse(test_playbook[0]['gather_facts'])
self.assertEqual(test_playbook[0]['connection'], 'local')
self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
class TestGalaxyInitContainer(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
cls.setUpRole('delete_me_container', galaxy_args=['--type=container'])
def test_metadata_container_tag(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertIn('container', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='container tag not set in role metadata')
def test_metadata_contents(self):
with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
metadata = yaml.safe_load(mf)
self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
def test_meta_container_yml(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'meta', 'container.yml')), msg='container.yml was not created')
def test_test_yml(self):
with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
test_playbook = yaml.safe_load(f)
print(test_playbook)
self.assertEqual(len(test_playbook), 1)
self.assertEqual(test_playbook[0]['hosts'], 'localhost')
self.assertFalse(test_playbook[0]['gather_facts'])
self.assertEqual(test_playbook[0]['connection'], 'local')
self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
class TestGalaxyInitSkeleton(unittest.TestCase, ValidRoleTests):
@classmethod
def setUpClass(cls):
role_skeleton_path = os.path.join(os.path.split(__file__)[0], 'test_data', 'role_skeleton')
cls.setUpRole('delete_me_skeleton', skeleton_path=role_skeleton_path, use_explicit_type=True)
def test_empty_files_dir(self):
files_dir = os.path.join(self.role_dir, 'files')
self.assertTrue(os.path.isdir(files_dir))
self.assertListEqual(os.listdir(files_dir), [], msg='we expect the files directory to be empty, is ignore working?')
def test_template_ignore_jinja(self):
test_conf_j2 = os.path.join(self.role_dir, 'templates', 'test.conf.j2')
self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
def test_template_ignore_jinja_subfolder(self):
test_conf_j2 = os.path.join(self.role_dir, 'templates', 'subfolder', 'test.conf.j2')
self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
def test_template_ignore_similar_folder(self):
self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'templates_extra', 'templates.txt')))
def test_skeleton_option(self):
self.assertEqual(self.role_skeleton_path, context.CLIARGS['role_skeleton'], msg='Skeleton path was not parsed properly from the command line')
@pytest.mark.parametrize('cli_args, expected', [
(['ansible-galaxy', 'collection', 'init', 'abc._def'], 0),
(['ansible-galaxy', 'collection', 'init', 'abc._def', '-vvv'], 3),
(['ansible-galaxy', '-vv', 'collection', 'init', 'abc._def'], 2),
# Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is
# deprecated and tests should be removed when the code that handles it is removed
(['ansible-galaxy', '-vv', 'collection', 'init', 'abc._def', '-v'], 1),
(['ansible-galaxy', '-vv', 'collection', 'init', 'abc._def', '-vvvv'], 4),
(['ansible-galaxy', '-vvv', 'init', 'name'], 3),
(['ansible-galaxy', '-vvvvv', 'init', '-v', 'name'], 1),
])
def test_verbosity_arguments(cli_args, expected, monkeypatch):
# Mock out the functions so we don't actually execute anything
for func_name in [f for f in dir(GalaxyCLI) if f.startswith("execute_")]:
monkeypatch.setattr(GalaxyCLI, func_name, MagicMock())
cli = GalaxyCLI(args=cli_args)
cli.run()
assert context.CLIARGS['verbosity'] == expected
@pytest.fixture()
def collection_skeleton(request, tmp_path_factory):
name, skeleton_path = request.param
galaxy_args = ['ansible-galaxy', 'collection', 'init', '-c']
if skeleton_path is not None:
galaxy_args += ['--collection-skeleton', skeleton_path]
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
galaxy_args += ['--init-path', test_dir, name]
GalaxyCLI(args=galaxy_args).run()
namespace_name, collection_name = name.split('.', 1)
collection_dir = os.path.join(test_dir, namespace_name, collection_name)
return collection_dir
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.my_collection', None),
], indirect=True)
def test_collection_default(collection_skeleton):
meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
with open(meta_path, 'r') as galaxy_meta:
metadata = yaml.safe_load(galaxy_meta)
assert metadata['namespace'] == 'ansible_test'
assert metadata['name'] == 'my_collection'
assert metadata['authors'] == ['your name <example@domain.com>']
assert metadata['readme'] == 'README.md'
assert metadata['version'] == '1.0.0'
assert metadata['description'] == 'your collection description'
assert metadata['license'] == ['GPL-2.0-or-later']
assert metadata['tags'] == []
assert metadata['dependencies'] == {}
assert metadata['documentation'] == 'http://docs.example.com'
assert metadata['repository'] == 'http://example.com/repository'
assert metadata['homepage'] == 'http://example.com'
assert metadata['issues'] == 'http://example.com/issue/tracker'
for d in ['docs', 'plugins', 'roles']:
assert os.path.isdir(os.path.join(collection_skeleton, d)), \
"Expected collection subdirectory {0} doesn't exist".format(d)
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.delete_me_skeleton', os.path.join(os.path.split(__file__)[0], 'test_data', 'collection_skeleton')),
], indirect=True)
def test_collection_skeleton(collection_skeleton):
meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
with open(meta_path, 'r') as galaxy_meta:
metadata = yaml.safe_load(galaxy_meta)
assert metadata['namespace'] == 'ansible_test'
assert metadata['name'] == 'delete_me_skeleton'
assert metadata['authors'] == ['Ansible Cow <acow@bovineuniversity.edu>', 'Tu Cow <tucow@bovineuniversity.edu>']
assert metadata['version'] == '0.1.0'
assert metadata['readme'] == 'README.md'
assert len(metadata) == 5
assert os.path.exists(os.path.join(collection_skeleton, 'README.md'))
# Test empty directories exist and are empty
for empty_dir in ['plugins/action', 'plugins/filter', 'plugins/inventory', 'plugins/lookup',
'plugins/module_utils', 'plugins/modules']:
assert os.listdir(os.path.join(collection_skeleton, empty_dir)) == []
# Test files that don't end with .j2 were not templated
doc_file = os.path.join(collection_skeleton, 'docs', 'My Collection.md')
with open(doc_file, 'r') as f:
doc_contents = f.read()
assert doc_contents.strip() == 'Welcome to my test collection doc for {{ namespace }}.'
# Test files that end with .j2 but are in the templates directory were not templated
for template_dir in ['playbooks/templates', 'playbooks/templates/subfolder',
'roles/common/templates', 'roles/common/templates/subfolder']:
test_conf_j2 = os.path.join(collection_skeleton, template_dir, 'test.conf.j2')
assert os.path.exists(test_conf_j2)
with open(test_conf_j2, 'r') as f:
contents = f.read()
expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
assert expected_contents == contents.strip()
@pytest.fixture()
def collection_artifact(collection_skeleton, tmp_path_factory):
''' Creates a collection artifact tarball that is ready to be published and installed '''
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_skeleton, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
# S_ISUID should not be present on extraction.
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_ISUID | stat.S_IEXEC)
# Because we call GalaxyCLI in collection_skeleton we need to reset the singleton back to None so it uses the new
# args, we reset the original args once it is done.
orig_cli_args = co.GlobalCLIArgs._Singleton__instance
try:
co.GlobalCLIArgs._Singleton__instance = None
galaxy_args = ['ansible-galaxy', 'collection', 'build', collection_skeleton, '--output-path', output_dir]
gc = GalaxyCLI(args=galaxy_args)
gc.run()
yield output_dir
finally:
co.GlobalCLIArgs._Singleton__instance = orig_cli_args
def test_invalid_skeleton_path():
expected = "- the skeleton path '/fake/path' does not exist, cannot init collection"
gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', 'my.collection', '--collection-skeleton',
'/fake/path'])
with pytest.raises(AnsibleError, match=expected):
gc.run()
@pytest.mark.parametrize("name", [
"",
"invalid",
"hypen-ns.collection",
"ns.hyphen-collection",
"ns.collection.weird",
])
def test_invalid_collection_name_init(name):
expected = "Invalid collection name '%s', name must be in the format <namespace>.<collection>" % name
gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', name])
with pytest.raises(AnsibleError, match=expected):
gc.run()
@pytest.mark.parametrize("name, expected", [
("", ""),
("invalid", "invalid"),
("invalid:1.0.0", "invalid"),
("hypen-ns.collection", "hypen-ns.collection"),
("ns.hyphen-collection", "ns.hyphen-collection"),
("ns.collection.weird", "ns.collection.weird"),
])
def test_invalid_collection_name_install(name, expected, tmp_path_factory):
install_path = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
# FIXME: we should add the collection name in the error message
# Used to be: expected = "Invalid collection name '%s', name must be in the format <namespace>.<collection>" % expected
expected = "Neither the collection requirement entry key 'name', nor 'source' point to a concrete resolvable collection artifact. "
expected += r"Also 'name' is not an FQCN\. A valid collection name must be in the format <namespace>\.<collection>\. "
expected += r"Please make sure that the namespace and the collection name contain characters from \[a\-zA\-Z0\-9_\] only\."
gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', name, '-p', os.path.join(install_path, 'install')])
with pytest.raises(AnsibleError, match=expected):
gc.run()
@pytest.mark.parametrize('collection_skeleton', [
('ansible_test.build_collection', None),
], indirect=True)
def test_collection_build(collection_artifact):
tar_path = os.path.join(collection_artifact, 'ansible_test-build_collection-1.0.0.tar.gz')
assert tarfile.is_tarfile(tar_path)
with tarfile.open(tar_path, mode='r') as tar:
tar_members = tar.getmembers()
valid_files = ['MANIFEST.json', 'FILES.json', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md',
'runme.sh']
assert len(tar_members) == len(valid_files)
# Verify the uid and gid is 0 and the correct perms are set
for member in tar_members:
assert member.name in valid_files
assert member.gid == 0
assert member.gname == ''
assert member.uid == 0
assert member.uname == ''
if member.isdir() or member.name == 'runme.sh':
assert member.mode == 0o0755
else:
assert member.mode == 0o0644
manifest_file = tar.extractfile(tar_members[0])
try:
manifest = json.loads(to_text(manifest_file.read()))
finally:
manifest_file.close()
coll_info = manifest['collection_info']
file_manifest = manifest['file_manifest_file']
assert manifest['format'] == 1
assert len(manifest.keys()) == 3
assert coll_info['namespace'] == 'ansible_test'
assert coll_info['name'] == 'build_collection'
assert coll_info['version'] == '1.0.0'
assert coll_info['authors'] == ['your name <example@domain.com>']
assert coll_info['readme'] == 'README.md'
assert coll_info['tags'] == []
assert coll_info['description'] == 'your collection description'
assert coll_info['license'] == ['GPL-2.0-or-later']
assert coll_info['license_file'] is None
assert coll_info['dependencies'] == {}
assert coll_info['repository'] == 'http://example.com/repository'
assert coll_info['documentation'] == 'http://docs.example.com'
assert coll_info['homepage'] == 'http://example.com'
assert coll_info['issues'] == 'http://example.com/issue/tracker'
assert len(coll_info.keys()) == 14
assert file_manifest['name'] == 'FILES.json'
assert file_manifest['ftype'] == 'file'
assert file_manifest['chksum_type'] == 'sha256'
assert file_manifest['chksum_sha256'] is not None # Order of keys makes it hard to verify the checksum
assert file_manifest['format'] == 1
assert len(file_manifest.keys()) == 5
files_file = tar.extractfile(tar_members[1])
try:
files = json.loads(to_text(files_file.read()))
finally:
files_file.close()
assert len(files['files']) == 7
assert files['format'] == 1
assert len(files.keys()) == 2
valid_files_entries = ['.', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md', 'runme.sh']
for file_entry in files['files']:
assert file_entry['name'] in valid_files_entries
assert file_entry['format'] == 1
if file_entry['name'] in ['plugins/README.md', 'runme.sh']:
assert file_entry['ftype'] == 'file'
assert file_entry['chksum_type'] == 'sha256'
# Can't test the actual checksum as the html link changes based on the version or the file contents
# don't matter
assert file_entry['chksum_sha256'] is not None
elif file_entry['name'] == 'README.md':
assert file_entry['ftype'] == 'file'
assert file_entry['chksum_type'] == 'sha256'
assert file_entry['chksum_sha256'] == '6d8b5f9b5d53d346a8cd7638a0ec26e75e8d9773d952162779a49d25da6ef4f5'
else:
assert file_entry['ftype'] == 'dir'
assert file_entry['chksum_type'] is None
assert file_entry['chksum_sha256'] is None
assert len(file_entry.keys()) == 5
@pytest.fixture()
def collection_install(reset_cli_args, tmp_path_factory, monkeypatch):
mock_install = MagicMock()
monkeypatch.setattr(ansible.cli.galaxy, 'install_collections', mock_install)
mock_warning = MagicMock()
monkeypatch.setattr(ansible.utils.display.Display, 'warning', mock_warning)
output_dir = to_text((tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output')))
yield mock_install, mock_warning, output_dir
def test_collection_install_with_names(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', output_dir]
GalaxyCLI(args=galaxy_args).run()
collection_path = os.path.join(output_dir, 'ansible_collections')
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_install.call_args[0][0]]
assert requirements == [('namespace.collection', '*', None, 'galaxy'),
('namespace2.collection', '1.0.1', None, 'galaxy')]
assert mock_install.call_args[0][1] == collection_path
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
def test_collection_install_with_requirements_file(collection_install):
mock_install, mock_warning, output_dir = collection_install
requirements_file = os.path.join(output_dir, 'requirements.yml')
with open(requirements_file, 'wb') as req_obj:
req_obj.write(b'''---
collections:
- namespace.coll
- name: namespace2.coll
version: '>2.0.1'
''')
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', output_dir]
GalaxyCLI(args=galaxy_args).run()
collection_path = os.path.join(output_dir, 'ansible_collections')
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_install.call_args[0][0]]
assert requirements == [('namespace.coll', '*', None, 'galaxy'),
('namespace2.coll', '>2.0.1', None, 'galaxy')]
assert mock_install.call_args[0][1] == collection_path
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
def test_collection_install_with_relative_path(collection_install, monkeypatch):
mock_install = collection_install[0]
mock_req = MagicMock()
mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []}
monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req)
monkeypatch.setattr(os, 'makedirs', MagicMock())
requirements_file = './requirements.myl'
collections_path = './ansible_collections'
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)]
assert mock_install.call_args[0][1] == os.path.abspath(collections_path)
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
assert mock_req.call_count == 1
assert mock_req.call_args[0][0] == os.path.abspath(requirements_file)
def test_collection_install_with_unexpanded_path(collection_install, monkeypatch):
mock_install = collection_install[0]
mock_req = MagicMock()
mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []}
monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req)
monkeypatch.setattr(os, 'makedirs', MagicMock())
requirements_file = '~/requirements.myl'
collections_path = '~/ansible_collections'
galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_count == 1
assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)]
assert mock_install.call_args[0][1] == os.path.expanduser(os.path.expandvars(collections_path))
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
assert mock_req.call_count == 1
assert mock_req.call_args[0][0] == os.path.expanduser(os.path.expandvars(requirements_file))
def test_collection_install_in_collection_dir(collection_install, monkeypatch):
mock_install, mock_warning, output_dir = collection_install
collections_path = C.COLLECTIONS_PATHS[0]
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', collections_path]
GalaxyCLI(args=galaxy_args).run()
assert mock_warning.call_count == 0
assert mock_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_install.call_args[0][0]]
assert requirements == [('namespace.collection', '*', None, 'galaxy'),
('namespace2.collection', '1.0.1', None, 'galaxy')]
assert mock_install.call_args[0][1] == os.path.join(collections_path, 'ansible_collections')
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
def test_collection_install_with_url(monkeypatch, collection_install):
mock_install, dummy, output_dir = collection_install
mock_open = MagicMock(return_value=BytesIO())
monkeypatch.setattr(collection.concrete_artifact_manager, 'open_url', mock_open)
mock_metadata = MagicMock(return_value={'namespace': 'foo', 'name': 'bar', 'version': 'v1.0.0'})
monkeypatch.setattr(collection.concrete_artifact_manager, '_get_meta_from_tar', mock_metadata)
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'https://foo/bar/foo-bar-v1.0.0.tar.gz',
'--collections-path', output_dir]
GalaxyCLI(args=galaxy_args).run()
collection_path = os.path.join(output_dir, 'ansible_collections')
assert os.path.isdir(collection_path)
assert mock_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_install.call_args[0][0]]
assert requirements == [('foo.bar', 'v1.0.0', 'https://foo/bar/foo-bar-v1.0.0.tar.gz', 'url')]
assert mock_install.call_args[0][1] == collection_path
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
def test_collection_install_name_and_requirements_fail(collection_install):
test_path = collection_install[2]
expected = 'The positional collection_name arg and --requirements-file are mutually exclusive.'
with pytest.raises(AnsibleError, match=expected):
GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path',
test_path, '--requirements-file', test_path]).run()
def test_collection_install_no_name_and_requirements_fail(collection_install):
test_path = collection_install[2]
expected = 'You must specify a collection name or a requirements file.'
with pytest.raises(AnsibleError, match=expected):
GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '--collections-path', test_path]).run()
def test_collection_install_path_with_ansible_collections(collection_install):
mock_install, mock_warning, output_dir = collection_install
collection_path = os.path.join(output_dir, 'ansible_collections')
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
'--collections-path', collection_path]
GalaxyCLI(args=galaxy_args).run()
assert os.path.isdir(collection_path)
assert mock_warning.call_count == 1
assert "The specified collections path '%s' is not part of the configured Ansible collections path" \
% collection_path in mock_warning.call_args[0][0]
assert mock_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_install.call_args[0][0]]
assert requirements == [('namespace.collection', '*', None, 'galaxy'),
('namespace2.collection', '1.0.1', None, 'galaxy')]
assert mock_install.call_args[0][1] == collection_path
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
assert mock_install.call_args[0][3] is False # ignore_errors
assert mock_install.call_args[0][4] is False # no_deps
assert mock_install.call_args[0][5] is False # force
assert mock_install.call_args[0][6] is False # force_deps
def test_collection_install_ignore_certs(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--ignore-certs']
GalaxyCLI(args=galaxy_args).run()
assert mock_install.call_args[0][3] is False
def test_collection_install_force(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--force']
GalaxyCLI(args=galaxy_args).run()
# mock_install args: collections, output_path, apis, ignore_errors, no_deps, force, force_deps
assert mock_install.call_args[0][5] is True
def test_collection_install_force_deps(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--force-with-deps']
GalaxyCLI(args=galaxy_args).run()
# mock_install args: collections, output_path, apis, ignore_errors, no_deps, force, force_deps
assert mock_install.call_args[0][6] is True
def test_collection_install_no_deps(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--no-deps']
GalaxyCLI(args=galaxy_args).run()
# mock_install args: collections, output_path, apis, ignore_errors, no_deps, force, force_deps
assert mock_install.call_args[0][4] is True
def test_collection_install_ignore(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--ignore-errors']
GalaxyCLI(args=galaxy_args).run()
# mock_install args: collections, output_path, apis, ignore_errors, no_deps, force, force_deps
assert mock_install.call_args[0][3] is True
def test_collection_install_custom_server(collection_install):
mock_install, mock_warning, output_dir = collection_install
galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
'--server', 'https://galaxy-dev.ansible.com']
GalaxyCLI(args=galaxy_args).run()
assert len(mock_install.call_args[0][2]) == 1
assert mock_install.call_args[0][2][0].api_server == 'https://galaxy-dev.ansible.com'
assert mock_install.call_args[0][2][0].validate_certs is True
@pytest.fixture()
def requirements_file(request, tmp_path_factory):
content = request.param
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Requirements'))
requirements_file = os.path.join(test_dir, 'requirements.yml')
if content:
with open(requirements_file, 'wb') as req_obj:
req_obj.write(to_bytes(content))
yield requirements_file
@pytest.fixture()
def requirements_cli(monkeypatch):
monkeypatch.setattr(GalaxyCLI, 'execute_install', MagicMock())
cli = GalaxyCLI(args=['ansible-galaxy', 'install'])
cli.run()
return cli
@pytest.mark.parametrize('requirements_file', [None], indirect=True)
def test_parse_requirements_file_that_doesnt_exist(requirements_cli, requirements_file):
expected = "The requirements file '%s' does not exist." % to_native(requirements_file)
with pytest.raises(AnsibleError, match=expected):
requirements_cli._parse_requirements_file(requirements_file)
@pytest.mark.parametrize('requirements_file', ['not a valid yml file: hi: world'], indirect=True)
def test_parse_requirements_file_that_isnt_yaml(requirements_cli, requirements_file):
expected = "Failed to parse the requirements yml at '%s' with the following error" % to_native(requirements_file)
with pytest.raises(AnsibleError, match=expected):
requirements_cli._parse_requirements_file(requirements_file)
@pytest.mark.parametrize('requirements_file', [('''
# Older role based requirements.yml
- galaxy.role
- anotherrole
''')], indirect=True)
def test_parse_requirements_in_older_format_illega(requirements_cli, requirements_file):
expected = "Expecting requirements file to be a dict with the key 'collections' that contains a list of " \
"collections to install"
with pytest.raises(AnsibleError, match=expected):
requirements_cli._parse_requirements_file(requirements_file, allow_old_format=False)
@pytest.mark.parametrize('requirements_file', ['''
collections:
- version: 1.0.0
'''], indirect=True)
def test_parse_requirements_without_mandatory_name_key(requirements_cli, requirements_file):
# Used to be "Collections requirement entry should contain the key name."
# Should we check that either source or name is provided before using the dep resolver?
expected = "Neither the collection requirement entry key 'name', nor 'source' point to a concrete resolvable collection artifact. "
expected += r"Also 'name' is not an FQCN\. A valid collection name must be in the format <namespace>\.<collection>\. "
expected += r"Please make sure that the namespace and the collection name contain characters from \[a\-zA\-Z0\-9_\] only\."
with pytest.raises(AnsibleError, match=expected):
requirements_cli._parse_requirements_file(requirements_file)
@pytest.mark.parametrize('requirements_file', [('''
collections:
- namespace.collection1
- namespace.collection2
'''), ('''
collections:
- name: namespace.collection1
- name: namespace.collection2
''')], indirect=True)
def test_parse_requirements(requirements_cli, requirements_file):
expected = {
'roles': [],
'collections': [('namespace.collection1', '*', None, 'galaxy'), ('namespace.collection2', '*', None, 'galaxy')]
}
actual = requirements_cli._parse_requirements_file(requirements_file)
actual['collections'] = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in actual.get('collections', [])]
assert actual == expected
@pytest.mark.parametrize('requirements_file', ['''
collections:
- name: namespace.collection1
version: ">=1.0.0,<=2.0.0"
source: https://galaxy-dev.ansible.com
- namespace.collection2'''], indirect=True)
def test_parse_requirements_with_extra_info(requirements_cli, requirements_file):
actual = requirements_cli._parse_requirements_file(requirements_file)
actual['collections'] = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in actual.get('collections', [])]
assert len(actual['roles']) == 0
assert len(actual['collections']) == 2
assert actual['collections'][0][0] == 'namespace.collection1'
assert actual['collections'][0][1] == '>=1.0.0,<=2.0.0'
assert actual['collections'][0][2].api_server == 'https://galaxy-dev.ansible.com'
assert actual['collections'][1] == ('namespace.collection2', '*', None, 'galaxy')
@pytest.mark.parametrize('requirements_file', ['''
roles:
- username.role_name
- src: username2.role_name2
- src: ssh://github.com/user/repo
scm: git
collections:
- namespace.collection2
'''], indirect=True)
def test_parse_requirements_with_roles_and_collections(requirements_cli, requirements_file):
actual = requirements_cli._parse_requirements_file(requirements_file)
actual['collections'] = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in actual.get('collections', [])]
assert len(actual['roles']) == 3
assert actual['roles'][0].name == 'username.role_name'
assert actual['roles'][1].name == 'username2.role_name2'
assert actual['roles'][2].name == 'repo'
assert actual['roles'][2].src == 'ssh://github.com/user/repo'
assert len(actual['collections']) == 1
assert actual['collections'][0] == ('namespace.collection2', '*', None, 'galaxy')
@pytest.mark.parametrize('requirements_file', ['''
collections:
- name: namespace.collection
- name: namespace2.collection2
source: https://galaxy-dev.ansible.com/
- name: namespace3.collection3
source: server
'''], indirect=True)
def test_parse_requirements_with_collection_source(requirements_cli, requirements_file):
galaxy_api = GalaxyAPI(requirements_cli.api, 'server', 'https://config-server')
requirements_cli.api_servers.append(galaxy_api)
actual = requirements_cli._parse_requirements_file(requirements_file)
actual['collections'] = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in actual.get('collections', [])]
assert actual['roles'] == []
assert len(actual['collections']) == 3
assert actual['collections'][0] == ('namespace.collection', '*', None, 'galaxy')
assert actual['collections'][1][0] == 'namespace2.collection2'
assert actual['collections'][1][1] == '*'
assert actual['collections'][1][2].api_server == 'https://galaxy-dev.ansible.com/'
assert actual['collections'][2][0] == 'namespace3.collection3'
assert actual['collections'][2][1] == '*'
assert actual['collections'][2][2].api_server == 'https://config-server'
@pytest.mark.parametrize('requirements_file', ['''
- username.included_role
- src: https://github.com/user/repo
'''], indirect=True)
def test_parse_requirements_roles_with_include(requirements_cli, requirements_file):
reqs = [
'ansible.role',
{'include': requirements_file},
]
parent_requirements = os.path.join(os.path.dirname(requirements_file), 'parent.yaml')
with open(to_bytes(parent_requirements), 'wb') as req_fd:
req_fd.write(to_bytes(yaml.safe_dump(reqs)))
actual = requirements_cli._parse_requirements_file(parent_requirements)
assert len(actual['roles']) == 3
assert actual['collections'] == []
assert actual['roles'][0].name == 'ansible.role'
assert actual['roles'][1].name == 'username.included_role'
assert actual['roles'][2].name == 'repo'
assert actual['roles'][2].src == 'https://github.com/user/repo'
@pytest.mark.parametrize('requirements_file', ['''
- username.role
- include: missing.yml
'''], indirect=True)
def test_parse_requirements_roles_with_include_missing(requirements_cli, requirements_file):
expected = "Failed to find include requirements file 'missing.yml' in '%s'" % to_native(requirements_file)
with pytest.raises(AnsibleError, match=expected):
requirements_cli._parse_requirements_file(requirements_file)
@pytest.mark.parametrize('requirements_file', ['''
collections:
- namespace.name
roles:
- namespace.name
'''], indirect=True)
def test_install_implicit_role_with_collections(requirements_file, monkeypatch):
mock_collection_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
mock_role_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-r', requirements_file])
cli.run()
assert mock_collection_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_collection_install.call_args[0][0]]
assert requirements == [('namespace.name', '*', None, 'galaxy')]
assert mock_collection_install.call_args[0][1] == cli._get_default_collection_path()
assert mock_role_install.call_count == 1
assert len(mock_role_install.call_args[0][0]) == 1
assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
found = False
for mock_call in mock_display.mock_calls:
if 'contains collections which will be ignored' in mock_call[1][0]:
found = True
break
assert not found
@pytest.mark.parametrize('requirements_file', ['''
collections:
- namespace.name
roles:
- namespace.name
'''], indirect=True)
def test_install_explicit_role_with_collections(requirements_file, monkeypatch):
mock_collection_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
mock_role_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_display)
cli = GalaxyCLI(args=['ansible-galaxy', 'role', 'install', '-r', requirements_file])
cli.run()
assert mock_collection_install.call_count == 0
assert mock_role_install.call_count == 1
assert len(mock_role_install.call_args[0][0]) == 1
assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
found = False
for mock_call in mock_display.mock_calls:
if 'contains collections which will be ignored' in mock_call[1][0]:
found = True
break
assert found
@pytest.mark.parametrize('requirements_file', ['''
collections:
- namespace.name
roles:
- namespace.name
'''], indirect=True)
def test_install_role_with_collections_and_path(requirements_file, monkeypatch):
mock_collection_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
mock_role_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_display)
cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-p', 'path', '-r', requirements_file])
cli.run()
assert mock_collection_install.call_count == 0
assert mock_role_install.call_count == 1
assert len(mock_role_install.call_args[0][0]) == 1
assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
found = False
for mock_call in mock_display.mock_calls:
if 'contains collections which will be ignored' in mock_call[1][0]:
found = True
break
assert found
@pytest.mark.parametrize('requirements_file', ['''
collections:
- namespace.name
roles:
- namespace.name
'''], indirect=True)
def test_install_collection_with_roles(requirements_file, monkeypatch):
mock_collection_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
mock_role_install = MagicMock()
monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_display)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '-r', requirements_file])
cli.run()
assert mock_collection_install.call_count == 1
requirements = [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type,) for r in mock_collection_install.call_args[0][0]]
assert requirements == [('namespace.name', '*', None, 'galaxy')]
assert mock_role_install.call_count == 0
found = False
for mock_call in mock_display.mock_calls:
if 'contains roles which will be ignored' in mock_call[1][0]:
found = True
break
assert found
|
sarvex/django | refs/heads/master | django/contrib/gis/geos/__init__.py | 164 | """
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geos/
"""
from .collections import ( # NOQA
GeometryCollection, MultiLineString, MultiPoint, MultiPolygon,
)
from .error import GEOSException # NOQA
from .factory import fromfile, fromstr # NOQA
from .geometry import GEOSGeometry, hex_regex, wkt_regex # NOQA
from .io import WKBReader, WKBWriter, WKTReader, WKTWriter # NOQA
from .libgeos import geos_version, geos_version_info # NOQA
from .linestring import LinearRing, LineString # NOQA
from .point import Point # NOQA
from .polygon import Polygon # NOQA
try:
HAS_GEOS = geos_version_info()['version'] >= '3.3.0'
except ImportError:
HAS_GEOS = False
|
scroggo/skia | refs/heads/master | tools/skp/page_sets/skia_twitter_desktop.py | 9 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_twitter_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(5)
class SkiaTwitterDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaTwitterDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_twitter_desktop.json')
urls_list = [
# Why: #8 (Alexa global), picked an interesting page
'http://twitter.com/katyperry',
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
|
JesseLivezey/plankton | refs/heads/plankton | pylearn2/sandbox/cuda_convnet/weight_acts.py | 49 | """
A theano / pylearn2 wrapper for cuda-convnet's convFilterActs function.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow and David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
"""
This module may contain code copied directly or modified from cuda-convnet.
The copyright and licensing notice for this code is reproduced below:
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
from theano.misc.strutil import render_string
from theano.sandbox.cuda import CudaNdarrayType
from theano.gof import Apply
from pylearn2.sandbox.cuda_convnet.base_acts import BaseActs
from pylearn2.sandbox.cuda_convnet.base_acts import UnimplementedError
class WeightActs(BaseActs):
"""
Transforms the gradient on the output of FilterActs into the gradient
on FilterActs' weights.
This is intended to be a very low-level, performance-oriented op.
It will not try to fix the input for you. That would slow it down.
The input must be in the right format. If not, it raises an exception.
Currently, this op must be inserted manually, not by optimizations.
Note that the word "input" below refers to the input to FilterActs.
* images: (input channels, rows, cols, batch_size)
Input channels must be divisible by 4.
* hid_grads: (output channels, rows, cols, batch_size)
Output channels must be a multiple of 16.
* filters: (input channels, filter rows, filter cols, output channels)
Filter rows must be the same as filter cols.
Notes
-----
All of these convolution routines are optimized for the case
when the number of images (i.e. the minibatch size) is a multiple
of 128. Other batch sizes will work, but Alex "made no attempt
whatsoever to make them work fast."
"""
# __eq__ and __hash__ are defined in BaseActs.
# If you add an __init__ method that adds new members to WeightActs,
# you may need to implement a new version of __eq__ and __hash__
# in WeightActs, that considers these parameters.
def make_node(self, images, hid_grads, output_shape):
"""
.. todo::
WRITEME
"""
if not isinstance(images.type, CudaNdarrayType):
raise TypeError("WeightActs: expected images.type "
"to be CudaNdarrayType, "
"got " + str(images.type))
if not isinstance(hid_grads.type, CudaNdarrayType):
raise TypeError("WeightActs: expected hid_acts.type "
"to be CudaNdarrayType, "
"got " + str(hid_grads.type))
assert images.ndim == 4
assert hid_grads.ndim == 4
input_channels_broadcastable = images.type.broadcastable[0]
# We don't know anything about filter_rows or filter_cols at compile
# time, so we assume they're not broadcastable.
filter_rows_broadcastable = False
filter_cols_broadcastable = False
output_channels_broadcastable = hid_grads.type.broadcastable[0]
weights_grads_type = CudaNdarrayType(
(input_channels_broadcastable,
filter_rows_broadcastable,
filter_cols_broadcastable,
output_channels_broadcastable))
partial_sums_type = CudaNdarrayType(
(False,) * 5
)
weights_grads = weights_grads_type()
partial_sums = partial_sums_type()
return Apply(self, [images, hid_grads, output_shape],
[weights_grads, partial_sums])
def flops(self, inputs, outputs):
""" Useful with the hack in profilemode to print the MFlops"""
images, kerns, output_shape = inputs
out, partial = outputs
# The partial sum is just a way to specify how to compute
# stuff inside the op. It don't change the number of flops.
assert images[3] == kerns[3]
# nb mul and add by output pixed
flops = kerns[1] * kerns[2] * 2
#nb flops by output image
flops *= out[1] * out[2]
# for all outputs images#n_stack==self.imshp[0]
flops *= images[3] * kerns[0] * images[0]
return flops
def c_headers(self):
"""
.. todo::
WRITEME
"""
# For some reason, the function called in the C code (_weightActs)
# is not defined in cudaconv2.cuh, so I defined it in weight_acts.cuh
headers = super(WeightActs, self).c_headers()
headers.append('weight_acts.cuh')
return headers
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
partial_sum = self.partial_sum if self.partial_sum is not None else 0
images, hid_grads, output_shape = inputs
weights_grads, partialsum_storage = outputs
fail = sub['fail']
pad = self.pad
# convFilterActs will multiply targets by scaleTargets
# then add scaleOutput * (the convolution value)
# We could make use of this to implement an inplace
# addconv op but for this op we just want to compute
# the convolution so we set them to 0 and 1 respectively
# Note: there is another version of convFilterActs that
# does not take these arguments, but it is just a wrapper
# around the version that does take them, so we save
# a function call by using the version that we use.
basic_setup = """
#define scaleTargets 0
#define scaleOutput 1
"""
if self.dense_connectivity:
basic_setup += """
#define numGroups 1
"""
basic_setup += """
#define paddingStart (-%(pad)d)
const int *hid_grads_dims = CudaNdarray_HOST_DIMS(%(hid_grads)s);
const int hidGradsSizeY = hid_grads_dims[1];
const int hidGradsSizeX = hid_grads_dims[2];
const int numModules = hidGradsSizeX * hidGradsSizeY;
int partialSum = %(partial_sum)d > 0 ? %(partial_sum)d : numModules;
// using this expression instead of numModules %% partialSum
// because nvcc+msvc9 yield a strange behaviour when using %%
if ( numModules - (numModules / partialSum) * partialSum != 0) {
PyErr_Format(PyExc_ValueError,
"partialSum must divide numModules, but partialSum=%%d and "
"numModules=%%d", partialSum, numModules);
%(fail)s;
}
"""
basic_setup += """
#define moduleStride %d
""" % self.stride
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup += "#define WEIGHTACTS_COPY_NON_CONTIGUOUS 0\n"
# The amount of braces that must be closed at the end
num_braces = 0
# Convert images int nv_images, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_images = self._argument_contiguity_check("images") + """
if (%(images)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"images must have nd=4, got nd=%%i", %(images)s->nd);
%(fail)s;
}
{ //setup_nv_images brace 1
const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);
const int img_channels = images_dims[0];
if (img_channels > 3 && img_channels %% 4 != 0)
{
PyErr_Format(PyExc_ValueError,
"images must have 3 or fewer channels, or have a multiple of 4 channels, got %%i",
img_channels);
%(fail)s;
}
{ //setup_nv_images brace 2
const int * hid_grads_dims = CudaNdarray_HOST_DIMS(%(hid_grads)s);
const int imgSizeY = images_dims[1];
const int imgSizeX = images_dims[2];
const int batch_size = images_dims[3];
NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size, "weight_acts: nv_images");
"""
num_braces += 2
# Convert hid_grads int nv_hid_grads, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_hid_grads = self._argument_contiguity_check("hid_grads") + """
if (%(hid_grads)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"hid_grads must have nd=4, got nd=%%i", %(hid_grads)s->nd);
%(fail)s;
}
{ //setup_nv_hid_grads brace 1
const int numFilters = hid_grads_dims[0];
const int batch_size = hid_grads_dims[3];
NVMatrix nv_hid_grads(%(hid_grads)s, numFilters * hidGradsSizeY *
hidGradsSizeX, batch_size, "weight_acts:nv_hid_grads");
"""
num_braces += 1
setup_nv_weights_grads = """
int filters_dims[4];
// filters: (input channels, filter rows, filter cols, output channels)
npy_intp *shape_dims = PyArray_DIMS(%(output_shape)s);
npy_intp target_rows, target_cols;
PyArrayObject *casted_shape;
PyArray_Descr *intp_dtype;
if (PyArray_NDIM(%(output_shape)s) != 1) {
PyErr_Format(PyExc_ValueError,
"output shape must be a vector, got %%d-tensor",
PyArray_NDIM(%(output_shape)s));
%(fail)s;
}
else if (shape_dims[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"output shape must be length 2, got %%d",
(int)shape_dims[0]);
%(fail)s;
}
else if ((PyArray_DESCR(%(output_shape)s))->kind != 'i' &&
(PyArray_DESCR(%(output_shape)s))->kind != 'u')
{
PyErr_SetString(PyExc_TypeError,
"output shape must have integer or uint dtype");
%(fail)s;
}
intp_dtype = PyArray_DescrFromType(NPY_INTP);
casted_shape = (PyArrayObject *)PyArray_CastToType(%(output_shape)s,
intp_dtype, 0);
target_rows = *((npy_intp *)PyArray_GETPTR1(casted_shape, 0));
target_cols = *((npy_intp *)PyArray_GETPTR1(casted_shape, 1));
filters_dims[0] = img_channels;
filters_dims[1] = target_rows;
filters_dims[2] = target_cols;
if (filters_dims[1] != filters_dims[2])
{
PyErr_Format(PyExc_ValueError,
"filter must be square, but have shape (%%d, %%d).",
filters_dims[1], filters_dims[2]);
%(fail)s;
}
else if (moduleStride > filters_dims[1]) {
PyErr_Format(PyExc_ValueError,
"stride %%d greater than filter size (%%d, %%d)",
moduleStride, filters_dims[1], filters_dims[2]);
%(fail)s;
}
filters_dims[3] = numFilters;
const int filterSize = filters_dims[1];
int partialsum_storage_dims[5];
for (int i = 1; i < 5; i++)
{
partialsum_storage_dims[i] = filters_dims[i - 1];
}
partialsum_storage_dims[0] = numModules / partialSum;
if (partialSum != numModules &&
CudaNdarray_prep_output(&%(partialsum_storage)s, 5,
partialsum_storage_dims))
{
%(fail)s;
}
for (int i = 0; i < 4; i++)
{
if (filters_dims[i] <= 0)
{
printf("filters_dims[%%d] = %%d\\n", i, filters_dims[i]);
assert(false);
}
}
if (CudaNdarray_prep_output(& %(weights_grads)s, 4, filters_dims))
{
%(fail)s;
}
{ // setup_nv_weights_grad brace # 1
NVMatrix nv_weights_grads(%(weights_grads)s, filters_dims[0] * filterSize
* filterSize, numFilters,
"weight_acts:nv_weights_grads");
"""
num_braces += 1
# note: imgSizeX is not specified here, it is computed internally
# (in _filterActsSparse) by the lines:
# int imgPixels = images.getNumRows() / numImgColors;
# int imgSizeX = imgPixels / imgSizeY;
#
# note: numFilters is not specified here. it is determined by
# nv_filters.getNumCols()
#
# note: the size of the filters is determined by dividing
# nv_filters.getNumRows() by numFilterColors
#
run_kernel = """
if (partialSum == numModules)
_weightActs(nv_images, nv_hid_grads, nv_weights_grads,
imgSizeY, hidGradsSizeY, hidGradsSizeX, filterSize,
paddingStart, moduleStride, img_channels, numGroups,
partialSum, 0, 1);
else {
NVMatrix nv_partialsum(%(partialsum_storage)s, (numModules / partialSum) *
filters_dims[0] * filterSize * filterSize, numFilters,
"weight_acts: nv_partialsum");
_weightActs(nv_images, nv_hid_grads, nv_partialsum,
imgSizeY, hidGradsSizeY, hidGradsSizeX, filterSize,
paddingStart, moduleStride, img_channels, numGroups,
partialSum, 0, 1);
nv_partialsum.reshape((numModules / partialSum), filters_dims[0] * filterSize * filterSize * numFilters);
// sum out axis 0 of nv_partialsum
#define AXIS 0
// scale the contents of nv_weights_grads by 0
// i.e., clear out its pre-existing content
#define SCALE_THIS 0
// scale the new sum by 1, i.e., don't do any scaling
#define SCALE_SUM 1
nv_weights_grads.addSum(nv_partialsum, AXIS, SCALE_THIS, SCALE_SUM);
}
"""
braces = '}' * num_braces
rval = (basic_setup +
setup_nv_images +
setup_nv_hid_grads +
setup_nv_weights_grads +
run_kernel +
braces)
rval = render_string(rval, locals())
return rval
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (7,)
|
tareqalayan/ansible | refs/heads/devel | lib/ansible/modules/cloud/heroku/heroku_collaborator.py | 74 | #!/usr/bin/python
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: heroku_collaborator
short_description: "Add or delete app collaborators on Heroku"
version_added: "2.6"
description:
- Manages collaborators for Heroku apps.
- If set to C(present) and heroku user is already collaborator, then do nothing.
- If set to C(present) and heroku user is not collaborator, then add user to app.
- If set to C(absent) and heroku user is collaborator, then delete user from app.
author:
- Marcel Arns (@marns93)
requirements:
- heroku3
options:
api_key:
description:
- Heroku API key
apps:
description:
- List of Heroku App names
required: true
suppress_invitation:
description:
- Suppress email invitation when creating collaborator
type: bool
default: "no"
user:
description:
- User ID or e-mail
required: true
state:
description:
- Create or remove the heroku collaborator
choices: ["present", "absent"]
default: "present"
notes:
- C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting c(api_key).
- If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
'''
EXAMPLES = '''
- heroku_collaborator:
api_key: YOUR_API_KEY
user: max.mustermann@example.com
apps: heroku-example-app
state: present
- heroku_collaborator:
api_key: YOUR_API_KEY
user: '{{ item.user }}'
apps: '{{ item.apps | default(apps) }}'
suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
state: '{{ item.state | default("present") }}'
with_items:
- { user: 'a.b@example.com' }
- { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
- { user: 'x.y@example.com', apps: ["heroku-example-app"] }
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.heroku import HerokuHelper
def add_or_delete_heroku_collaborator(module, client):
user = module.params['user']
state = module.params['state']
affected_apps = []
result_state = False
for app in module.params['apps']:
if app not in client.apps():
module.fail_json(msg='App {0} does not exist'.format(app))
heroku_app = client.apps()[app]
heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
if state == 'absent' and user in heroku_collaborator_list:
if not module.check_mode:
heroku_app.remove_collaborator(user)
affected_apps += [app]
result_state = True
elif state == 'present' and user not in heroku_collaborator_list:
if not module.check_mode:
heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
affected_apps += [app]
result_state = True
return result_state, affected_apps
def main():
argument_spec = HerokuHelper.heroku_argument_spec()
argument_spec.update(
user=dict(required=True, type='str'),
apps=dict(required=True, type='list'),
suppress_invitation=dict(default=False, type='bool'),
state=dict(default='present', type='str', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
client = HerokuHelper(module).get_heroku_client()
has_changed, msg = add_or_delete_heroku_collaborator(module, client)
module.exit_json(changed=has_changed, msg=msg)
if __name__ == '__main__':
main()
|
iniju/ankidroid-triage | refs/heads/master | mapreduce/control.py | 1 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for controlling MapReduce execution outside of MapReduce framework."""
__all__ = ["start_map"]
# pylint: disable-msg=C6409
from mapreduce import base_handler
from mapreduce import handlers
from mapreduce import model
_DEFAULT_SHARD_COUNT = 8
def start_map(name,
handler_spec,
reader_spec,
mapper_parameters,
shard_count=_DEFAULT_SHARD_COUNT,
output_writer_spec=None,
mapreduce_parameters=None,
base_path=None,
queue_name=None,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
transactional=False):
"""Start a new, mapper-only mapreduce.
Args:
name: mapreduce name. Used only for display purposes.
handler_spec: fully qualified name of mapper handler function/class to call.
reader_spec: fully qualified name of mapper reader to use
mapper_parameters: dictionary of parameters to pass to mapper. These are
mapper-specific and also used for reader initialization.
shard_count: number of shards to create.
mapreduce_parameters: dictionary of mapreduce parameters relevant to the
whole job.
base_path: base path of mapreduce library handler specified in app.yaml.
"/mapreduce" by default.
queue_name: executor queue name to be used for mapreduce tasks. If
unspecified it will be the "default" queue or inherit the queue of
the currently running request.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
hooks_class_name: fully qualified name of a hooks.Hooks subclass.
transactional: Specifies if job should be started as a part of already
opened transaction.
Returns:
mapreduce id as string.
"""
if not shard_count:
shard_count = _DEFAULT_SHARD_COUNT
if base_path is None:
base_path = base_handler._DEFAULT_BASE_PATH
mapper_spec = model.MapperSpec(handler_spec,
reader_spec,
mapper_parameters,
shard_count,
output_writer_spec=output_writer_spec)
return handlers.StartJobHandler._start_map(
name,
mapper_spec,
mapreduce_parameters or {},
base_path=base_path,
queue_name=queue_name,
eta=eta,
countdown=countdown,
hooks_class_name=hooks_class_name,
_app=_app,
transactional=transactional)
|
revanthkolli/osf.io | refs/heads/develop | scripts/consistency/ensure_backrefs.py | 55 | """
Add missing backrefs where needed.
"""
import time
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
import logging
logging.basicConfig(level=logging.DEBUG)
app = init_app()
def clean_backrefs_files():
for record in models.NodeFile.find():
ensure_backrefs(record, ['node', 'uploader'])
def clean_backrefs_logs():
for record in models.NodeLog.find():
ensure_backrefs(record, ['user', 'api_key'])
if __name__ == '__main__':
t0 = time.time()
clean_backrefs_files()
clean_backrefs_logs()
logging.debug('Spent {}'.format(time.time() - t0)) |
sitian/wing | refs/heads/master | services/whub/dev/monitor.py | 1 | # monitor.py
#
# Copyright (C) 2013 Yi-Wei Ci <ciyiwei@hotmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from util import mon
from device import WHubDev
class Monitor(WHubDev):
def register(self, name):
mon.create(name)
return True
def unregister(self, name):
mon.destroy(name)
return True
def mount(self, name):
mon.start(name)
return True
def unmount(self, name):
mon.stop(name)
return True
|
gersolar/factopy | refs/heads/master | factopy/tests/units/adapters.py | 1 | # -*- coding: utf-8 -*-
from factopy.models import Stream, Adapt
from django.test import TestCase
class TestAdapters(TestCase):
fixtures = ['initial_data.yaml', '*']
def setUp(self):
self.stream = Stream()
self.stream.save()
self.adapter = Adapt.objects.get_or_create(name='abstract one')[0]
self.adapter.streams.add(self.stream)
def test_update(self):
# check if the update method raise a "Subclass responsability"
# exception because the subclass should implement the method update.
with self.assertRaisesRegexp(Exception, u'Subclass responsability'):
self.adapter.update()
def test_should_adapt(self):
# by default an adapter should not adapt anything.
self.assertFalse(self.adapter.should_adapt())
def test_step(self):
# by default dont should execute the update.
self.assertEquals(self.adapter.step(), None)
# when a subclass should_adapt, it execute the update and raise
# the exception.
self.adapter.should_adapt = lambda: True
with self.assertRaisesRegexp(Exception, u'Subclass responsability'):
self.adapter.step()
|
tbachman/group-based-policy | refs/heads/master | gbpservice/neutron/services/grouppolicy/common/constants.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GP_ACTION_ALLOW = 'allow'
GP_ACTION_REDIRECT = 'redirect'
GP_DIRECTION_IN = 'in'
GP_DIRECTION_OUT = 'out'
GP_DIRECTION_BI = 'bi'
GP_NETWORK_SVC_PARAM_TYPE = 'type'
GP_NETWORK_SVC_PARAM_NAME = 'name'
GP_NETWORK_SVC_PARAM_VALUE = 'value'
GP_NETWORK_SVC_PARAM_TYPE_IP_SINGLE = 'ip_single'
GP_NETWORK_SVC_PARAM_TYPE_IP_POOL = 'ip_pool'
GP_NETWORK_SVC_PARAM_TYPE_STRING = 'string'
GP_NETWORK_SVC_PARAM_VALUE_SELF_SUBNET = 'self_subnet'
GP_NETWORK_SVC_PARAM_VALUE_NAT_POOL = 'nat_pool'
|
intgr/django | refs/heads/master | tests/from_db_value/models.py | 26 | import decimal
from django.db import models
class Cash(decimal.Decimal):
currency = 'USD'
def __str__(self):
s = super().__str__(self)
return '%s %s' % (s, self.currency)
class CashField(models.DecimalField):
def __init__(self, **kwargs):
kwargs['max_digits'] = 20
kwargs['decimal_places'] = 2
super().__init__(**kwargs)
def from_db_value(self, value, expression, connection, context):
cash = Cash(value)
cash.vendor = connection.vendor
return cash
class CashModel(models.Model):
cash = CashField()
def __str__(self):
return str(self.cash)
|
redondomarco/useradm | refs/heads/master | doc/Mayoral/RPC/Auth.py | 1 | # -*- coding: utf8 -*-
"""
Autenticación en servidor RPC
"""
import Mayoral.RPC.Server
import ldap
from Mayoral.config import config
class RequestHandler(Mayoral.RPC.Server.RequestHandler):
rpc_paths = ('/RPC2',)
def setup(self):
Mayoral.RPC.Server.RequestHandler.setup(self)
RequestHandler.instance=self
self.server.user=None
self.server.userGroups=set()
def authenticate(self,user, passwd):
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE,config['ssl']['CAfile'])
conn=ldap.initialize(config['ldap']['server'])
try:
conn.simple_bind_s("uid=%s,%s" % (user, config['ldap']['authBase']),passwd)
except ldap.INVALID_CREDENTIALS,e:
return False
allowed_groups=config['auth']['allowed_groups']
user_groups=set()
for group in allowed_groups:
try:
if conn.search_s('cn=%s,ou=Grupos,o=MCR' % group,ldap.SCOPE_BASE,
'(member=uid=%s,ou=People,ou=sap,ou=Servers,o=MCR)' % user):
user_groups.add(group)
except ldap.NO_SUCH_OBJECT:
pass
if len(user_groups)==0:
return False
self.server.user = user
self.server.passwd = passwd
self.server.userGroups = user_groups
return True
|
igemsoftware/SYSU-Software2013 | refs/heads/master | project/Python27_32/Lib/lib2to3/fixes/fix_map.py | 327 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
12520054/pybot | refs/heads/master | chatterbot/input/__init__.py | 4 | from .input_adapter import InputAdapter
from .microsoft import Microsoft
from .gitter import Gitter
from .hipchat import HipChat
from .mailgun import Mailgun
from .terminal import TerminalAdapter
from .variable_input_type_adapter import VariableInputTypeAdapter |
arnaud-morvan/QGIS | refs/heads/master | python/plugins/db_manager/db_plugins/plugin.py | 2 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from qgis.PyQt.QtCore import Qt, QObject, pyqtSignal
from qgis.PyQt.QtWidgets import QApplication, QAction, QMenu, QInputDialog, QMessageBox
from qgis.PyQt.QtGui import QKeySequence, QIcon
from qgis.gui import QgsMessageBar
from qgis.core import Qgis, QgsApplication, QgsSettings
from ..db_plugins import createDbPlugin
class BaseError(Exception):
"""Base class for exceptions in the plugin."""
def __init__(self, e):
if isinstance(e, Exception):
msg = e.args[0] if len(e.args) > 0 else ''
else:
msg = e
if not isinstance(msg, str):
msg = str(msg, 'utf-8', 'replace') # convert from utf8 and replace errors (if any)
self.msg = msg
Exception.__init__(self, msg)
def __unicode__(self):
return self.msg
class InvalidDataException(BaseError):
pass
class ConnectionError(BaseError):
pass
class DbError(BaseError):
def __init__(self, e, query=None):
BaseError.__init__(self, e)
self.query = str(query) if query is not None else None
def __unicode__(self):
if self.query is None:
return BaseError.__unicode__(self)
msg = QApplication.translate("DBManagerPlugin", "Error:\n{0}").format(BaseError.__unicode__(self))
if self.query:
msg += QApplication.translate("DBManagerPlugin", "\n\nQuery:\n{0}").format(self.query)
return msg
class DBPlugin(QObject):
deleted = pyqtSignal()
changed = pyqtSignal()
aboutToChange = pyqtSignal()
def __init__(self, conn_name, parent=None):
QObject.__init__(self, parent)
self.connName = conn_name
self.db = None
def __del__(self):
pass # print "DBPlugin.__del__", self.connName
def connectionIcon(self):
return QgsApplication.getThemeIcon("/mIconDbSchema.svg")
def connectionName(self):
return self.connName
def database(self):
return self.db
def info(self):
from .info_model import DatabaseInfo
return DatabaseInfo(None)
def connect(self, parent=None):
raise NotImplementedError('Needs to be implemented by subclasses')
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def reconnect(self):
if self.db is not None:
uri = self.db.uri()
self.db.deleteLater()
self.db = None
return self.connectToUri(uri)
return self.connect(self.parent())
def remove(self):
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), self.connectionName()))
settings.remove("")
self.deleted.emit()
return True
@classmethod
def addConnection(self, conn_name, uri):
raise NotImplementedError('Needs to be implemented by subclasses')
@classmethod
def icon(self):
return None
@classmethod
def typeName(self):
# return the db typename (e.g. 'postgis')
pass
@classmethod
def typeNameString(self):
# return the db typename string (e.g. 'PostGIS')
pass
@classmethod
def providerName(self):
# return the provider's name (e.g. 'postgres')
pass
@classmethod
def connectionSettingsKey(self):
# return the key used to store the connections in settings
pass
@classmethod
def connections(self):
# get the list of connections
conn_list = []
settings = QgsSettings()
settings.beginGroup(self.connectionSettingsKey())
for name in settings.childGroups():
conn_list.append(createDbPlugin(self.typeName(), name))
settings.endGroup()
return conn_list
def databasesFactory(self, connection, uri):
return None
@classmethod
def addConnectionActionSlot(self, item, action, parent):
raise NotImplementedError('Needs to be implemented by subclasses')
def removeActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
res = QMessageBox.question(parent, QApplication.translate("DBManagerPlugin", "DB Manager"),
QApplication.translate("DBManagerPlugin",
"Really remove connection to {0}?").format(item.connectionName()),
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.remove()
class DbItemObject(QObject):
changed = pyqtSignal()
aboutToChange = pyqtSignal()
deleted = pyqtSignal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
def database(self):
return None
def refresh(self):
self.changed.emit() # refresh the item data reading them from the db
def info(self):
pass
def runAction(self):
pass
def registerActions(self, mainWindow):
pass
class Database(DbItemObject):
def __init__(self, dbplugin, uri):
DbItemObject.__init__(self, dbplugin)
self.connector = self.connectorsFactory(uri)
def connectorsFactory(self, uri):
return None
def __del__(self):
self.connector = None
pass # print "Database.__del__", self
def connection(self):
return self.parent()
def dbplugin(self):
return self.parent()
def database(self):
return self
def uri(self):
return self.connector.uri()
def publicUri(self):
return self.connector.publicUri()
def delete(self):
self.aboutToChange.emit()
ret = self.connection().remove()
if ret is not False:
self.deleted.emit()
return ret
def info(self):
from .info_model import DatabaseInfo
return DatabaseInfo(self)
def sqlResultModel(self, sql, parent):
from .data_model import SqlResultModel
return SqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import SqlResultModelAsync
return SqlResultModelAsync(self, sql, parent)
def columnUniqueValuesModel(self, col, table, limit=10):
l = ""
if limit is not None:
l = "LIMIT %d" % limit
return self.sqlResultModel("SELECT DISTINCT %s FROM %s %s" % (col, table, l), self)
def uniqueIdFunction(self):
"""Return a SQL function used to generate a unique id for rows of a query"""
# may be overloaded by derived classes
return "row_number() over ()"
def toSqlLayer(self, sql, geomCol, uniqueCol, layerName="QueryLayer", layerType=None, avoidSelectById=False, filter=""):
from qgis.core import QgsMapLayer, QgsVectorLayer, QgsRasterLayer
if uniqueCol is None:
if hasattr(self, 'uniqueIdFunction'):
uniqueFct = self.uniqueIdFunction()
if uniqueFct is not None:
q = 1
while "_subq_%d_" % q in sql:
q += 1
sql = u"SELECT %s AS _uid_,* FROM (%s\n) AS _subq_%d_" % (uniqueFct, sql, q)
uniqueCol = "_uid_"
uri = self.uri()
uri.setDataSource("", u"(%s\n)" % sql, geomCol, filter, uniqueCol)
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
if layerType == QgsMapLayer.RasterLayer:
return QgsRasterLayer(uri.uri(False), layerName, provider)
return QgsVectorLayer(uri.uri(False), layerName, provider)
def registerAllActions(self, mainWindow):
self.registerDatabaseActions(mainWindow)
self.registerSubPluginActions(mainWindow)
def registerSubPluginActions(self, mainWindow):
# load plugins!
try:
exec(u"from .%s.plugins import load" % self.dbplugin().typeName(), globals())
except ImportError:
pass
else:
load(self, mainWindow) # NOQA
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate("DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Database"),
self.reconnectActionSlot)
if self.schemas() is not None:
action = QAction(QApplication.translate("DBManagerPlugin", "&Create Schema…"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Schema"),
self.createSchemaActionSlot)
action = QAction(QApplication.translate("DBManagerPlugin", "&Delete (Empty) Schema"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Schema"),
self.deleteSchemaActionSlot)
action = QAction(QApplication.translate("DBManagerPlugin", "Delete Selected Item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QgsApplication.getThemeIcon("/mActionCreateTable.svg"),
QApplication.translate("DBManagerPlugin", "&Create Table…"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"),
self.createTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionEditTable.svg"),
QApplication.translate("DBManagerPlugin", "&Edit Table…"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionDeleteTable.svg"),
QApplication.translate("DBManagerPlugin", "&Delete Table/View…"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"),
self.deleteTableActionSlot)
action = QAction(QApplication.translate("DBManagerPlugin", "&Empty Table…"), self)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"),
self.emptyTableActionSlot)
if self.schemas() is not None:
action = QAction(QApplication.translate("DBManagerPlugin", "&Move to Schema"), self)
action.setMenu(QMenu(mainWindow))
def invoke_callback():
return mainWindow.invokeCallback(self.prepareMenuMoveTableToSchemaActionSlot)
action.menu().aboutToShow.connect(invoke_callback)
mainWindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"))
def reconnectActionSlot(self, item, action, parent):
db = item.database()
db.connection().reconnect()
db.refresh()
def deleteActionSlot(self, item, action, parent):
if isinstance(item, Schema):
self.deleteSchemaActionSlot(item, action, parent)
elif isinstance(item, Table):
self.deleteTableActionSlot(item, action, parent)
else:
QApplication.restoreOverrideCursor()
parent.infoBar.pushMessage(QApplication.translate("DBManagerPlugin", "Cannot delete the selected item."),
Qgis.Info, parent.iface.messageTimeout())
QApplication.setOverrideCursor(Qt.WaitCursor)
def createSchemaActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, (DBPlugin, Schema, Table)) or item.database() is None:
parent.infoBar.pushMessage(
QApplication.translate("DBManagerPlugin", "No database selected or you are not connected to it."),
Qgis.Info, parent.iface.messageTimeout())
return
(schema, ok) = QInputDialog.getText(parent, QApplication.translate("DBManagerPlugin", "New schema"),
QApplication.translate("DBManagerPlugin", "Enter new schema name"))
if not ok:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
self.createSchema(schema)
def deleteSchemaActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Schema):
parent.infoBar.pushMessage(
QApplication.translate("DBManagerPlugin", "Select an empty schema for deletion."),
Qgis.Info, parent.iface.messageTimeout())
return
res = QMessageBox.question(parent, QApplication.translate("DBManagerPlugin", "DB Manager"),
QApplication.translate("DBManagerPlugin",
"Really delete schema {0}?").format(item.name),
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.delete()
def schemasFactory(self, row, db):
return None
def schemas(self):
schemas = self.connector.getSchemas()
if schemas is not None:
schemas = [self.schemasFactory(x, self) for x in schemas]
return schemas
def createSchema(self, name):
self.connector.createSchema(name)
self.refresh()
def createTableActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
if not hasattr(item, 'database') or item.database() is None:
parent.infoBar.pushMessage(
QApplication.translate("DBManagerPlugin", "No database selected or you are not connected to it."),
Qgis.Info, parent.iface.messageTimeout())
return
from ..dlg_create_table import DlgCreateTable
DlgCreateTable(item, parent).exec_()
QApplication.setOverrideCursor(Qt.WaitCursor)
def editTableActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(QApplication.translate("DBManagerPlugin", "Select a table to edit."),
Qgis.Info, parent.iface.messageTimeout())
return
from ..dlg_table_properties import DlgTableProperties
DlgTableProperties(item, parent).exec_()
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
def deleteTableActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table):
parent.infoBar.pushMessage(
QApplication.translate("DBManagerPlugin", "Select a table/view for deletion."),
Qgis.Info, parent.iface.messageTimeout())
return
res = QMessageBox.question(parent, QApplication.translate("DBManagerPlugin", "DB Manager"),
QApplication.translate("DBManagerPlugin",
"Really delete table/view {0}?").format(item.name),
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.delete()
def emptyTableActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(QApplication.translate("DBManagerPlugin", "Select a table to empty it."),
Qgis.Info, parent.iface.messageTimeout())
return
res = QMessageBox.question(parent, QApplication.translate("DBManagerPlugin", "DB Manager"),
QApplication.translate("DBManagerPlugin",
"Really delete all items from table {0}?").format(item.name),
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.empty()
def prepareMenuMoveTableToSchemaActionSlot(self, item, menu, mainWindow):
""" populate menu with schemas """
def slot(x):
return lambda: mainWindow.invokeCallback(self.moveTableToSchemaActionSlot, x)
menu.clear()
for schema in self.schemas():
menu.addAction(schema.name, slot(schema))
def moveTableToSchemaActionSlot(self, item, action, parent, new_schema):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table):
parent.infoBar.pushMessage(QApplication.translate("DBManagerPlugin", "Select a table/view."),
Qgis.Info, parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.moveToSchema(new_schema)
def tablesFactory(self, row, db, schema=None):
typ, row = row[0], row[1:]
if typ == Table.VectorType:
return self.vectorTablesFactory(row, db, schema)
elif typ == Table.RasterType:
return self.rasterTablesFactory(row, db, schema)
return self.dataTablesFactory(row, db, schema)
def dataTablesFactory(self, row, db, schema=None):
return None
def vectorTablesFactory(self, row, db, schema=None):
return None
def rasterTablesFactory(self, row, db, schema=None):
return None
def tables(self, schema=None, sys_tables=False):
tables = self.connector.getTables(schema.name if schema else None, sys_tables)
if tables is not None:
tables = [self.tablesFactory(x, self, schema) for x in tables]
return tables
def createTable(self, table, fields, schema=None):
field_defs = [x.definition() for x in fields]
pkeys = [x for x in fields if x.primaryKey]
pk_name = pkeys[0].name if len(pkeys) > 0 else None
ret = self.connector.createTable((schema, table), field_defs, pk_name)
if ret is not False:
self.refresh()
return ret
def createVectorTable(self, table, fields, geom, schema=None):
ret = self.createTable(table, fields, schema)
if not ret:
return False
try:
createGeomCol = geom is not None
if createGeomCol:
geomCol, geomType, geomSrid, geomDim = geom[:4]
createSpatialIndex = geom[4] if len(geom) > 4 else False
self.connector.addGeometryColumn((schema, table), geomCol, geomType, geomSrid, geomDim)
if createSpatialIndex:
# commit data definition changes, otherwise index can't be built
self.connector._commit()
self.connector.createSpatialIndex((schema, table), geomCol)
finally:
self.refresh()
return True
def explicitSpatialIndex(self):
return False
def spatialIndexClause(self, src_table, src_column, dest_table, dest_table_column):
return None
def hasLowercaseFieldNamesOption(self):
return False
class Schema(DbItemObject):
def __init__(self, db):
DbItemObject.__init__(self, db)
self.oid = self.name = self.owner = self.perms = None
self.comment = None
self.tableCount = 0
def __del__(self):
pass # print "Schema.__del__", self
def database(self):
return self.parent()
def schema(self):
return self
def tables(self):
return self.database().tables(self)
def delete(self):
self.aboutToChange.emit()
ret = self.database().connector.deleteSchema(self.name)
if ret is not False:
self.deleted.emit()
return ret
def rename(self, new_name):
self.aboutToChange.emit()
ret = self.database().connector.renameSchema(self.name, new_name)
if ret is not False:
self.name = new_name
self.refresh()
return ret
def info(self):
from .info_model import SchemaInfo
return SchemaInfo(self)
class Table(DbItemObject):
TableType, VectorType, RasterType = list(range(3))
def __init__(self, db, schema=None, parent=None):
DbItemObject.__init__(self, db)
self._schema = schema
if hasattr(self, 'type'):
return
self.type = Table.TableType
self.name = self.isView = self.owner = self.pages = None
self.comment = None
self.rowCount = None
self._fields = self._indexes = self._constraints = self._triggers = self._rules = None
def __del__(self):
pass # print "Table.__del__", self
def canBeAddedToCanvas(self):
return True
def database(self):
return self.parent()
def schema(self):
return self._schema
def schemaName(self):
return self.schema().name if self.schema() else None
def quotedName(self):
return self.database().connector.quoteId((self.schemaName(), self.name))
def delete(self):
self.aboutToChange.emit()
if self.isView:
ret = self.database().connector.deleteView((self.schemaName(), self.name))
else:
ret = self.database().connector.deleteTable((self.schemaName(), self.name))
if ret is not False:
self.deleted.emit()
return ret
def rename(self, new_name):
self.aboutToChange.emit()
ret = self.database().connector.renameTable((self.schemaName(), self.name), new_name)
if ret is not False:
self.name = new_name
self.refresh()
return ret
def empty(self):
self.aboutToChange.emit()
ret = self.database().connector.emptyTable((self.schemaName(), self.name))
if ret is not False:
self.refreshRowCount()
return ret
def moveToSchema(self, schema):
self.aboutToChange.emit()
if self.schema() == schema:
return True
ret = self.database().connector.moveTableToSchema((self.schemaName(), self.name), schema.name)
if ret is not False:
self.schema().refresh()
schema.refresh()
return ret
def info(self):
from .info_model import TableInfo
return TableInfo(self)
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQgisUniqueFields(True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None, None, uniqueCol.name if uniqueCol else "")
return uri
def mimeUri(self):
layerType = "raster" if self.type == Table.RasterType else "vector"
return u"%s:%s:%s:%s" % (layerType, self.database().dbplugin().providerName(), self.name, self.uri().uri(False))
def toMapLayer(self):
from qgis.core import QgsVectorLayer, QgsRasterLayer
provider = self.database().dbplugin().providerName()
uri = self.uri().uri(False)
if self.type == Table.RasterType:
return QgsRasterLayer(uri, self.name, provider)
return QgsVectorLayer(uri, self.name, provider)
def getValidQgisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in Qgis canvas.
Qgis automatically search for a valid unique field, so it's
needed only for queries and views """
ret = []
# add the pk
pkcols = [x for x in self.fields() if x.primaryKey]
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add both oid, serial and int fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if fld.dataType in ["oid", "serial", "int4", "int8"] and fld not in ret:
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if fld.dataType in ["oid", "serial", "int4", "int8"] and fld not in ret:
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def tableDataModel(self, parent):
pass
def tableFieldsFactory(self, row, table):
raise NotImplementedError('Needs to be implemented by subclasses')
def fields(self):
if self._fields is None:
fields = self.database().connector.getTableFields((self.schemaName(), self.name))
if fields is not None:
self._fields = [self.tableFieldsFactory(x, self) for x in fields]
return self._fields
def refreshFields(self):
self._fields = None # refresh table fields
self.refresh()
def addField(self, fld):
self.aboutToChange.emit()
ret = self.database().connector.addTableColumn((self.schemaName(), self.name), fld.definition())
if ret is not False:
self.refreshFields()
return ret
def deleteField(self, fld):
self.aboutToChange.emit()
ret = self.database().connector.deleteTableColumn((self.schemaName(), self.name), fld.name)
if ret is not False:
self.refreshFields()
self.refreshConstraints()
self.refreshIndexes()
return ret
def addGeometryColumn(self, geomCol, geomType, srid, dim, createSpatialIndex=False):
self.aboutToChange.emit()
ret = self.database().connector.addGeometryColumn((self.schemaName(), self.name), geomCol, geomType, srid, dim)
if not ret:
return False
try:
if createSpatialIndex:
# commit data definition changes, otherwise index can't be built
self.database().connector._commit()
self.database().connector.createSpatialIndex((self.schemaName(), self.name), geomCol)
finally:
self.schema().refresh() if self.schema() else self.database().refresh() # another table was added
return True
def tableConstraintsFactory(self):
return None
def constraints(self):
if self._constraints is None:
constraints = self.database().connector.getTableConstraints((self.schemaName(), self.name))
if constraints is not None:
self._constraints = [self.tableConstraintsFactory(x, self) for x in constraints]
return self._constraints
def refreshConstraints(self):
self._constraints = None # refresh table constraints
self.refresh()
def addConstraint(self, constr):
self.aboutToChange.emit()
if constr.type == TableConstraint.TypePrimaryKey:
ret = self.database().connector.addTablePrimaryKey((self.schemaName(), self.name),
constr.fields()[constr.columns[0]].name)
elif constr.type == TableConstraint.TypeUnique:
ret = self.database().connector.addTableUniqueConstraint((self.schemaName(), self.name),
constr.fields()[constr.columns[0]].name)
else:
return False
if ret is not False:
self.refreshConstraints()
return ret
def deleteConstraint(self, constr):
self.aboutToChange.emit()
ret = self.database().connector.deleteTableConstraint((self.schemaName(), self.name), constr.name)
if ret is not False:
self.refreshConstraints()
return ret
def tableIndexesFactory(self):
return None
def indexes(self):
if self._indexes is None:
indexes = self.database().connector.getTableIndexes((self.schemaName(), self.name))
if indexes is not None:
self._indexes = [self.tableIndexesFactory(x, self) for x in indexes]
return self._indexes
def refreshIndexes(self):
self._indexes = None # refresh table indexes
self.refresh()
def addIndex(self, idx):
self.aboutToChange.emit()
ret = self.database().connector.createTableIndex((self.schemaName(), self.name), idx.name,
idx.fields()[idx.columns[0]].name)
if ret is not False:
self.refreshIndexes()
return ret
def deleteIndex(self, idx):
self.aboutToChange.emit()
ret = self.database().connector.deleteTableIndex((self.schemaName(), self.name), idx.name)
if ret is not False:
self.refreshIndexes()
return ret
def tableTriggersFactory(self, row, table):
return None
def triggers(self):
if self._triggers is None:
triggers = self.database().connector.getTableTriggers((self.schemaName(), self.name))
if triggers is not None:
self._triggers = [self.tableTriggersFactory(x, self) for x in triggers]
return self._triggers
def refreshTriggers(self):
self._triggers = None # refresh table triggers
self.refresh()
def tableRulesFactory(self, row, table):
return None
def rules(self):
if self._rules is None:
rules = self.database().connector.getTableRules((self.schemaName(), self.name))
if rules is not None:
self._rules = [self.tableRulesFactory(x, self) for x in rules]
return self._rules
def refreshRules(self):
self._rules = None # refresh table rules
self.refresh()
def refreshRowCount(self):
self.aboutToChange.emit()
prevRowCount = self.rowCount
try:
self.rowCount = self.database().connector.getTableRowCount((self.schemaName(), self.name))
self.rowCount = int(self.rowCount) if self.rowCount is not None else None
except DbError:
self.rowCount = None
if self.rowCount != prevRowCount:
self.refresh()
def runAction(self, action):
action = str(action)
if action.startswith("rows/"):
if action == "rows/count":
self.refreshRowCount()
return True
elif action.startswith("triggers/"):
parts = action.split('/')
trigger_action = parts[1]
msg = QApplication.translate("DBManagerPlugin", "Do you want to {0} all triggers?").format(trigger_action)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, QApplication.translate("DBManagerPlugin", "Table triggers"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if trigger_action == "enable" or trigger_action == "disable":
enable = trigger_action == "enable"
self.aboutToChange.emit()
self.database().connector.enableAllTableTriggers(enable, (self.schemaName(), self.name))
self.refreshTriggers()
return True
elif action.startswith("trigger/"):
parts = action.split('/')
trigger_name = parts[1]
trigger_action = parts[2]
msg = QApplication.translate("DBManagerPlugin", "Do you want to {0} trigger {1}?").format(
trigger_action, trigger_name)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, QApplication.translate("DBManagerPlugin", "Table trigger"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if trigger_action == "delete":
self.aboutToChange.emit()
self.database().connector.deleteTableTrigger(trigger_name, (self.schemaName(), self.name))
self.refreshTriggers()
return True
elif trigger_action == "enable" or trigger_action == "disable":
enable = trigger_action == "enable"
self.aboutToChange.emit()
self.database().connector.enableTableTrigger(trigger_name, enable, (self.schemaName(), self.name))
self.refreshTriggers()
return True
return False
class VectorTable(Table):
def __init__(self, db, schema=None, parent=None):
if not hasattr(self, 'type'): # check if the superclass constructor was called yet!
Table.__init__(self, db, schema, parent)
self.type = Table.VectorType
self.geomColumn = self.geomType = self.geomDim = self.srid = None
self.estimatedExtent = self.extent = None
def info(self):
from .info_model import VectorTableInfo
return VectorTableInfo(self)
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column is not None else self.geomColumn
fld = None
for fld in self.fields():
if fld.name == geom_column:
break
if fld is None:
return False
for idx in self.indexes():
if fld.num in idx.columns:
return True
return False
def createSpatialIndex(self, geom_column=None):
self.aboutToChange.emit()
geom_column = geom_column if geom_column is not None else self.geomColumn
ret = self.database().connector.createSpatialIndex((self.schemaName(), self.name), geom_column)
if ret is not False:
self.refreshIndexes()
return ret
def deleteSpatialIndex(self, geom_column=None):
self.aboutToChange.emit()
geom_column = geom_column if geom_column is not None else self.geomColumn
ret = self.database().connector.deleteSpatialIndex((self.schemaName(), self.name), geom_column)
if ret is not False:
self.refreshIndexes()
return ret
def refreshTableExtent(self):
prevExtent = self.extent
try:
self.extent = self.database().connector.getTableExtent((self.schemaName(), self.name), self.geomColumn)
except DbError:
self.extent = None
if self.extent != prevExtent:
self.refresh()
def refreshTableEstimatedExtent(self):
prevEstimatedExtent = self.estimatedExtent
try:
self.estimatedExtent = self.database().connector.getTableEstimatedExtent((self.schemaName(), self.name),
self.geomColumn)
except DbError:
self.estimatedExtent = None
if self.estimatedExtent != prevEstimatedExtent:
self.refresh()
def runAction(self, action):
action = str(action)
if action.startswith("spatialindex/"):
parts = action.split('/')
spatialIndex_action = parts[1]
msg = QApplication.translate("DBManagerPlugin", "Do you want to {0} spatial index for field {1}?").format(
spatialIndex_action, self.geomColumn)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, QApplication.translate("DBManagerPlugin", "Spatial Index"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if spatialIndex_action == "create":
self.createSpatialIndex()
return True
elif spatialIndex_action == "delete":
self.deleteSpatialIndex()
return True
if action.startswith("extent/"):
if action == "extent/get":
self.refreshTableExtent()
return True
if action == "extent/estimated/get":
self.refreshTableEstimatedExtent()
return True
return Table.runAction(self, action)
class RasterTable(Table):
def __init__(self, db, schema=None, parent=None):
if not hasattr(self, 'type'): # check if the superclass constructor was called yet!
Table.__init__(self, db, schema, parent)
self.type = Table.RasterType
self.geomColumn = self.geomType = self.pixelSizeX = self.pixelSizeY = self.pixelType = self.isExternal = self.srid = None
self.extent = None
def info(self):
from .info_model import RasterTableInfo
return RasterTableInfo(self)
class TableSubItemObject(QObject):
def __init__(self, table):
QObject.__init__(self, table)
def table(self):
return self.parent()
def database(self):
return self.table().database() if self.table() else None
class TableField(TableSubItemObject):
def __init__(self, table):
TableSubItemObject.__init__(self, table)
self.num = self.name = self.dataType = self.modifier = self.notNull = self.default = self.hasDefault = self.primaryKey = None
self.comment = None
def type2String(self):
if self.modifier is None or self.modifier == -1:
return u"%s" % self.dataType
return u"%s (%s)" % (self.dataType, self.modifier)
def default2String(self):
if not self.hasDefault:
return ''
return self.default if self.default is not None else "NULL"
def definition(self):
from .connector import DBConnector
quoteIdFunc = self.database().connector.quoteId if self.database() else DBConnector.quoteId
name = quoteIdFunc(self.name)
not_null = "NOT NULL" if self.notNull else ""
txt = u"%s %s %s" % (name, self.type2String(), not_null)
if self.hasDefault:
txt += u" DEFAULT %s" % self.default2String()
return txt
def delete(self):
return self.table().deleteField(self)
def rename(self, new_name):
return self.update(new_name)
def update(self, new_name, new_type_str=None, new_not_null=None, new_default_str=None):
self.table().aboutToChange.emit()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn((self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str, new_not_null,
new_default_str)
if ret is not False:
self.table().refreshFields()
return ret
class TableConstraint(TableSubItemObject):
""" class that represents a constraint of a table (relation) """
TypeCheck, TypeForeignKey, TypePrimaryKey, TypeUnique, TypeExclusion, TypeUnknown = list(range(6))
types = {"c": TypeCheck, "f": TypeForeignKey, "p": TypePrimaryKey, "u": TypeUnique, "x": TypeExclusion}
onAction = {"a": "NO ACTION", "r": "RESTRICT", "c": "CASCADE", "n": "SET NULL", "d": "SET DEFAULT"}
matchTypes = {"u": "UNSPECIFIED", "f": "FULL", "p": "PARTIAL", "s": "SIMPLE"}
def __init__(self, table):
TableSubItemObject.__init__(self, table)
self.name = self.type = self.columns = None
def type2String(self):
if self.type == TableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == TableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == TableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == TableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
if self.type == TableConstraint.TypeExclusion:
return QApplication.translate("DBManagerPlugin", "Exclusion")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
def fieldFromNum(num, fields):
""" return field specified by its number or None if doesn't exist """
for fld in fields:
if fld.num == num:
return fld
return None
fields = self.table().fields()
cols = {}
for num in self.columns:
cols[num] = fieldFromNum(num, fields)
return cols
def delete(self):
return self.table().deleteConstraint(self)
class TableIndex(TableSubItemObject):
def __init__(self, table):
TableSubItemObject.__init__(self, table)
self.name = self.columns = self.isUnique = None
def fields(self):
def fieldFromNum(num, fields):
""" return field specified by its number or None if doesn't exist """
for fld in fields:
if fld.num == num:
return fld
return None
fields = self.table().fields()
cols = {}
for num in self.columns:
cols[num] = fieldFromNum(num, fields)
return cols
def delete(self):
return self.table().deleteIndex(self)
class TableTrigger(TableSubItemObject):
""" class that represents a trigger """
# Bits within tgtype (pg_trigger.h)
TypeRow = (1 << 0) # row or statement
TypeBefore = (1 << 1) # before or after
# events: one or more
TypeInsert = (1 << 2)
TypeDelete = (1 << 3)
TypeUpdate = (1 << 4)
TypeTruncate = (1 << 5)
def __init__(self, table):
TableSubItemObject.__init__(self, table)
self.name = self.function = None
def type2String(self):
trig_type = u''
trig_type += "Before " if self.type & TableTrigger.TypeBefore else "After "
if self.type & TableTrigger.TypeInsert:
trig_type += "INSERT "
if self.type & TableTrigger.TypeUpdate:
trig_type += "UPDATE "
if self.type & TableTrigger.TypeDelete:
trig_type += "DELETE "
if self.type & TableTrigger.TypeTruncate:
trig_type += "TRUNCATE "
trig_type += "\n"
trig_type += "for each "
trig_type += "row" if self.type & TableTrigger.TypeRow else "statement"
return trig_type
class TableRule(TableSubItemObject):
def __init__(self, table):
TableSubItemObject.__init__(self, table)
self.name = self.definition = None
|
chdecultot/erpnext | refs/heads/develop | erpnext/healthcare/doctype/healthcare_service_unit_type/test_healthcare_service_unit_type.py | 14 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestHealthcareServiceUnitType(unittest.TestCase):
pass
|
luotao1/Paddle | refs/heads/develop | python/paddle/optimizer/optimizer.py | 1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import six
import logging
from collections import defaultdict
import paddle
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard
from ..fluid import framework
from ..fluid import layers
from ..fluid import unique_name
from ..fluid.backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name
from ..fluid.clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops
from ..fluid.framework import program_guard
from ..fluid.initializer import Constant
from ..fluid.layer_helper import LayerHelper
from ..fluid.layers import ops
from ..fluid.regularizer import append_regularization_ops
from ..fluid.dygraph import base as imperative_base
from ..fluid.dygraph import no_grad
from paddle.fluid import core
from paddle.fluid.layers import tensor
from functools import reduce
from ..fluid.wrapped_decorator import signature_safe_contextmanager
from .. import compat as cpt
from .lr import LRScheduler
__all__ = ['Optimizer']
class Optimizer(object):
r"""Optimizer Base class.
Define the common interface of an optimizer.
User should not use this class directly,
but need to use one of it's implementation.
Args:
learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
It can be a float value or any subclass of ``LRScheduler`` .
parameters (list, optional): List of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
It canbe a float value as coeff of L2 regularization or \
:ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
some derived class of ``GradientClipBase`` . There are three cliping strategies \
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
Returns:
Base class for optimizer.
Examples:
.. code-block:: python
#Take the subclass adam as an example
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adam.step()
adam.clear_grad()
"""
@imperative_base.no_grad
def __init__(self,
learning_rate,
parameters=None,
weight_decay=None,
grad_clip=None,
name=None):
self._parameter_list = list(
parameters) if parameters is not None else None
self._name = name
if framework.in_dygraph_mode():
if self._parameter_list is None:
raise AttributeError(
"parameters argument given to the Optimizer should not be None in dygraph mode."
)
if weight_decay is not None:
for param in self._parameter_list:
if param.regularizer is not None:
logging.info(
"If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
"The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% weight_decay.__str__())
break
if not isinstance(learning_rate, (float, LRScheduler)):
raise TypeError(
"learning rate should be float or LRScheduler, got %s here" %
type(learning_rate))
if grad_clip is not None:
if not isinstance(grad_clip, GradientClipBase):
raise TypeError(
"'grad_clip' should be an instance of GradientClipBase's derived class"
)
if isinstance(weight_decay, float):
from ..fluid.regularizer import L2Decay
self.regularization = L2Decay(weight_decay)
else:
self.regularization = weight_decay
self._grad_clip = grad_clip
self._learning_rate = learning_rate
self._dtype = None
# Infer the dtype form parameter
if self._parameter_list:
self._dtype = self._parameter_list[0].dtype
# each program should have a independent learning rate
# program -> tensor(learning_rate)
self._learning_rate_map = dict()
# Dictionary of accumulators. Some optimizer subclasses need to
# allocate and manage extra tensors associated with the parameters
# to train. These tensors are called accumulators.
# {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
self._accumulators = defaultdict(lambda: dict())
self.helper = None
self._opti_name_list = []
self._accumulators_holder = {}
self._param_device_map = dict()
self.clear_gradients = self.clear_grad
@framework.dygraph_only
def state_dict(self):
'''
Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
If the optimizer never be called(minimize function), the state_dict is empty.
Args:
None
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
state_dict = adam.state_dict()
'''
state_dict = {}
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
state_dict[var_tmp.name] = var_tmp
# global step if use lr decay
if isinstance(self._learning_rate, LRScheduler):
state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
return state_dict
@framework.dygraph_only
def set_state_dict(self, state_dict):
'''
Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Return:
None
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict()
paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
opt_state_dict = adam.state_dict()
paddle.save(opt_state_dict, "adam.pdopt")
opti_state_dict = paddle.load("adam.pdopt")
adam.set_state_dict(opti_state_dict)
'''
if isinstance(self._learning_rate, LRScheduler):
self._learning_rate.set_dict(state_dict["LR_Scheduler"])
if isinstance(self._learning_rate, LRScheduler):
self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
self._accumulators_holder = state_dict
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
assert var_tmp.name in state_dict, \
"optimizer Tensor {} not found".format( var_tmp.name )
var = var_tmp.value()
tensor = var.get_tensor()
model_np = np.array(tensor)
load_para = state_dict[var_tmp.name]
if isinstance(load_para, Variable):
load_para_np = load_para.numpy()
elif isinstance(load_para, core.VarBase):
load_para_np = load_para.numpy()
elif isinstance(load_para, np.ndarray):
load_para_np = load_para
else:
raise RuntimeError("State dict type {} not supprt".format(
str(type(load_para))))
assert model_np.shape == load_para_np.shape, \
"Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
item.name, model_np.shape, load_para_np.shape)
assert model_np.dtype == load_para_np.dtype, \
"Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
item.name, model_np.dtype, load_para_np.dtype)
tensor.set(load_para_np, framework._current_expected_place())
def get_opti_var_name_list(self):
return self._opti_name_list
def _create_global_learning_rate(self):
if isinstance(self._learning_rate, LRScheduler):
lr_var = self._global_learning_rate()
# only create global lr_var once
if not isinstance(lr_var, framework.Variable):
lr_name = unique_name.generate('learning_rate')
self._learning_rate._var_name = lr_name
lr_var = self.helper.create_global_variable(
name=lr_name,
shape=[1],
persistable=True,
stop_gradient=True,
dtype=paddle.get_default_dtype()
if self._dtype is None else self._dtype)
main_prog = framework.default_main_program()
main_prog.lr_sheduler = self._learning_rate
main_prog.lr_var = lr_var
self._learning_rate_map[framework.default_main_program(
)] = lr_var
lr_value = float(self._learning_rate())
self.helper.set_variable_initializer(
lr_var, initializer=Constant(value=lr_value))
elif isinstance(self._learning_rate, float):
# only create global lr_var once
lr = self._global_learning_rate()
if isinstance(lr, framework.Variable):
return
else:
self._learning_rate_map[framework.default_main_program(
)] = layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._learning_rate),
dtype=paddle.get_default_dtype()
if self._dtype is None else self._dtype,
persistable=True)
@framework.dygraph_only
def set_lr(self, value):
"""
:api_attr: imperative
Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
this API cannot be invoked, because it will lead to conflict.
Args:
value (float): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
"""
if not isinstance(value, (int, float)):
raise TypeError(
"The type of 'value' in optimizer.set_lr must be float, but received %s."
% (type(value)))
if isinstance(self._learning_rate, LRScheduler):
raise RuntimeError(
"optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
)
self._learning_rate = float(value)
current_lr = self._global_learning_rate()
if current_lr is not None:
global_block = framework.default_main_program().global_block()
global_block.append_op(
type='fill_constant',
outputs={'Out': [current_lr]},
attrs={
'dtype': current_lr.dtype,
'shape': list(current_lr.shape),
'value': float(value)
},
stop_gradient=True)
def get_lr(self):
"""
Get current learning rate of optimizer.
If 'LRScheduler' is not used, the return value is all the same.
If 'LRScheduler' is used, the return value is the current scheduled learing rete.
Returns:
float: The current learning rate of optimizer.
Examples:
.. code-block:: python
# train on default dynamic graph mode
import paddle
import numpy as np
emb = paddle.nn.Embedding(10, 3)
## example1: LRScheduler is not used, return the same value is all the same
adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
adam.step()
## example2: StepDecay is used, return the scheduled learning rate
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
adam.step()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 10])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(learning_rate=scheduler)
adam.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for batch in range(10):
print("Learning rate of step{}: {}", adam.get_lr()) # 0.5->0.05->0.005...
out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
scheduler.step()
"""
if isinstance(self._learning_rate, float):
return self._learning_rate
else:
return self._learning_rate()
def _global_learning_rate(self, program=None):
"""
get global decayed learning rate
:return:
"""
if program is None:
program = framework.default_main_program()
return self._learning_rate_map.get(program, None)
def _append_optimize_op(self, block, param_and_grad):
""" append optimize operator to block and return all the added optimize_op
"""
raise NotImplementedError(
"Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
)
def _create_param_lr(self, param_and_grad):
# create learning rate tensor for every parameter
param = param_and_grad[0]
param_lr = param.optimize_attr['learning_rate']
if type(param_lr) == Variable:
return param_lr
else:
if param_lr == 1.0:
return self._global_learning_rate()
else:
with default_main_program()._lr_schedule_guard(
is_with_opt=True), framework.name_scope(
'scale_with_param_lr'):
return self._global_learning_rate() * param_lr
def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
"""
pass
def _finish_update(self, block, parameters_and_grads):
"""Finish any custom updates needed
before completing an optimization step
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
Returns:
None
"""
pass
def _add_accumulator(self,
name,
param,
dtype=None,
fill_value=0.0,
shape=None,
type=None,
device=None):
"""Utility function to add an accumulator for a parameter
Args:
block: the block in which the loss tensor is present
name: name of the accumulator
param: parameter tensor for which accumulator is to be added
dtype: data type of the accumulator tensor
fill_value: value to initialize the accumulator tensor
"""
if self._name is not None:
name = self._name + "_" + name
if (name in self._accumulators and
param.name in self._accumulators[name]):
if framework.in_dygraph_mode():
return self._accumulators[name][param.name]
raise Exception("Accumulator {} already exists for parameter {}".
format(name, param.name))
if shape == None:
shape = param.shape
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_" + name
var_name = unique_name.generate(var_name)
self._opti_name_list.append(var_name)
var = self.helper.create_global_variable(
name=var_name,
persistable=True,
dtype=dtype or param.dtype,
type=param.type if type is None else type,
shape=shape,
belong_to_optimizer=True)
if device is None:
device = self._get_device_for_param(param.name)
with device_guard(device):
self.helper.set_variable_initializer(
var, initializer=Constant(value=float(fill_value)))
if framework.in_dygraph_mode():
if len(self._accumulators_holder) > 0:
assert var_name in self._accumulators_holder, \
"Optimizer set error, {} should in state dict".format( var_name )
var.set_value(self._accumulators_holder[var_name])
self._accumulators[name][param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter tensor for which accumulator is to be fetched
Returns:
accumulator tensor for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def _update_param_device_map(self, parameters_and_grads, target_block):
for param_and_grad in parameters_and_grads:
if param_and_grad[0].stop_gradient is False:
param_name = param_and_grad[0].name
ops = target_block.ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
)
for op in ops:
input_arg_names = op.input_arg_names
if param_name in input_arg_names:
self._param_device_map[param_name] = op.attr(
device_attr_name)
break
def _get_device_for_param(self, param_name):
device = None
if param_name in self._param_device_map:
device = self._param_device_map[param_name]
return device
def _create_optimization_pass(self, parameters_and_grads):
"""Add optimization operators to update gradients to tensors.
Args:
parameters_and_grads(list(tuple(Tensor, Tensor))):
a list of (tensor, gradient) pair to update.
Returns:
return_op_list: a list of operators that will complete one step of
optimization. This will include parameter update ops, global step
update ops and any other custom ops required by subclasses to manage
their internal state.
"""
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
# the subclass will implement the _append_optimize_op method and the
# _initialize_tensors method. The subclass can extend the
# _create_accumulators method if it needs to create accumulators
# for parameters and extend _finish_update method to add custom ops.
# Allways called under program_guard use global block as loss block
# But if current block is in control flow, append optimize op in the
# grad block of current block
global_block = framework.default_main_program().global_block()
target_block = global_block
current_block = framework.default_main_program().current_block()
if current_block.idx != global_block.idx:
assert current_block.backward_block_idx != -1, \
"current block is not global_block, but it doesn't have backward block."
target_block = framework.default_main_program().blocks[
current_block.backward_block_idx]
start = len(target_block.ops)
self.helper = LayerHelper(self.__class__.__name__)
self._update_param_device_map(parameters_and_grads, target_block)
self._create_accumulators(
target_block,
[p[0] for p in parameters_and_grads if not p[0].stop_gradient])
self._create_global_learning_rate()
if framework.in_dygraph_mode():
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
if param_and_grad[0].stop_gradient is False:
self._append_optimize_op(target_block, param_and_grad)
else:
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program._optimized_guard(
param_and_grad), name_scope("optimizer"):
if param_and_grad[0].stop_gradient is False:
device = self._get_device_for_param(param_and_grad[0]
.name)
with device_guard(device):
optimize_op = self._append_optimize_op(
target_block, param_and_grad)
# Get custom finish ops for subclasses
# FIXME: Need to fix this once we figure out how to handle dependencies
self._finish_update(target_block, parameters_and_grads)
end = len(target_block.ops)
return target_block._slice_ops(start, end)
def _append_dgc_ops(self, param_and_grad):
pass
def backward(self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None,
callbacks=None):
"""
The first part of ``minimize``, do auto-diff to append backward operations for
the current program.
Args:
loss (Tensor): ``loss`` tensor to run optimizations.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
callbacks (list, optional): list of callable objects to run when appending backward
operator for one parameter. The default value is None.
Return:
list: list of (param, grad) tensor pairs, param is ``Parameter``,
grad is the gradient value corresponding to the parameter.
Examples:
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
act_no_grad_set = None
if framework.in_dygraph_mode():
pass
else:
act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
# Infer dtype by loss if None
if self._dtype is None:
self._dtype = loss.dtype
if framework.in_dygraph_mode():
parameter_list = parameters if parameters \
else self._parameter_list
params_grads = []
for param in parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
# create gradient tensor
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
else:
if callbacks is None:
callbacks = [error_clip_callback]
else:
assert (isinstance(callbacks, list))
program = loss.block.program
assert len(loss.shape) == 1 and loss.shape[0] == 1, \
"The loss.shape should be (1L,), but the current loss.shape is {}. " \
"Maybe that you should call paddle.mean to process the current loss.".format(
loss.shape)
parameter_list = parameters if parameters \
else self._parameter_list
with program_guard(program, startup_program):
params_grads = append_backward(loss, parameter_list,
act_no_grad_set, callbacks)
# Note: since we can't use all_reduce_op now,
# dgc_op should be the last op of one grad.
self._append_dgc_ops(params_grads)
return params_grads
def apply_gradients(self, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
params_grads = optimizer.backward(loss)
optimizer.apply_gradients(params_grads)
"""
params_grads = sorted(params_grads, key=lambda x: x[0].name)
# 'optimizer(grad_clip)' or 'set_gradient_clip'
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
else:
params_grads = append_gradient_clip_ops(params_grads)
# Add regularization if any
params_grads = append_regularization_ops(params_grads,
self.regularization)
optimize_ops = self._create_optimization_pass(params_grads)
return optimize_ops
def _apply_optimize(self, loss, startup_program, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
loss (Tensor): loss tensor to run optimizations.
startup_program (Program): startup_program for initializing parameters
in `parameters`.
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
"""
if framework.in_dygraph_mode():
with program_guard(framework.default_main_program(),
framework.default_startup_program()):
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
params_grads = append_regularization_ops(params_grads,
self.regularization)
optimize_ops = self._create_optimization_pass(params_grads)
else:
program = loss.block.program
with program_guard(program, startup_program):
optimize_ops = self.apply_gradients(params_grads)
return optimize_ops
def _get_no_grad_set(self, loss, no_grad_set=None):
no_grad_set = _get_no_grad_set_name(no_grad_set)
parameters = loss.block.program.global_block().all_parameters()
param_no_trainable = set([
param.name for param in parameters if param.stop_gradient is True
])
# If the parameter is no trainable, it should not have a gradient.
no_grad_set.update(param_no_trainable)
return no_grad_set
@framework.dygraph_only
def clear_grad(self):
"""
Clear the gradients of all optimized parameters for model.
If not, new gradient will accumulat on previous gradient.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
for p in self._parameter_list:
if not p.stop_gradient:
p.clear_gradient()
@imperative_base.no_grad
def minimize(self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None):
"""
Add operations to minimize ``loss`` by updating ``parameters``.
Args:
loss (Tensor): A ``Tensor`` containing the value to minimize.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
Returns:
tuple: tuple (optimize_ops, params_grads), A list of operators appended
by minimize and a list of (param, grad) tensor pairs, param is
``Parameter``, grad is the gradient value corresponding to the parameter.
In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
indicate program pruning. If so, the program will be pruned by ``feed`` and
``fetch_list`` before run, see details in ``Executor``.
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(input)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
out.backward()
adam.minimize(loss)
adam.clear_grad()
"""
assert isinstance(loss, Variable), "The loss should be an Tensor."
parameter_list = parameters if parameters \
else self._parameter_list
params_grads = self.backward(
loss,
startup_program=startup_program,
parameters=parameter_list,
no_grad_set=no_grad_set)
optimize_ops = self._apply_optimize(
loss, startup_program=startup_program, params_grads=params_grads)
return optimize_ops, params_grads
@imperative_base.no_grad
@framework.dygraph_only
def step(self):
"""
Execute the optimizer and update parameters once.
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
params_grads = []
for param in self._parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
|
teran/bootloader-web | refs/heads/master | examples/scripts/delete-deployments.py | 1 | #!/usr/bin/env python
import os
import requests
import sys
try:
API_TOKEN = os.environ['API_TOKEN']
BOOTLOADER_URL = os.environ['BOOTLOADER_URL']
except KeyError as e:
print("""
API_TOKEN and BOOTLOADER_URL must be passed as environment variables
Example:
export API_TOKEN=<token>
export BOOTLOADER_URL=<bootloaderurl>
%s
""" % __file__)
sys.exit(1)
headers = {
'User-Agent': 'Bootloader Delete Deployment example script/0.1',
'Authorization': 'Token %s' % (API_TOKEN,),
}
r = requests.get(
'%s/api/v1alpha2/deployments/' % BOOTLOADER_URL,
headers=headers).json()
for deployment in r:
requests.delete(
'%s/api/v1alpha2/deployments/%s/' % (
BOOTLOADER_URL, deployment['id'],),
headers=headers)
|
cswiercz/sympy | refs/heads/master | sympy/strategies/tests/test_tree.py | 59 | from sympy.strategies.tree import treeapply, greedy, allresults, brute
from sympy.core.compatibility import reduce
from functools import partial
def test_treeapply():
tree = ([3, 3], [4, 1], 2)
assert treeapply(tree, {list: min, tuple: max}) == 3
add = lambda *args: sum(args)
mul = lambda *args: reduce(lambda a, b: a*b, args, 1)
assert treeapply(tree, {list: add, tuple: mul}) == 60
def test_treeapply_leaf():
assert treeapply(3, {}, leaf=lambda x: x**2) == 9
tree = ([3, 3], [4, 1], 2)
treep1 = ([4, 4], [5, 2], 3)
assert treeapply(tree, {list: min, tuple: max}, leaf=lambda x: x+1) == \
treeapply(treep1, {list: min, tuple: max})
def test_treeapply_strategies():
from sympy.strategies import chain, minimize
join = {list: chain, tuple: minimize}
inc = lambda x: x + 1
dec = lambda x: x - 1
double = lambda x: 2*x
assert treeapply(inc, join) == inc
assert treeapply((inc, dec), join)(5) == minimize(inc, dec)(5)
assert treeapply([inc, dec], join)(5) == chain(inc, dec)(5)
tree = (inc, [dec, double]) # either inc or dec-then-double
assert treeapply(tree, join)(5) == 6
assert treeapply(tree, join)(1) == 0
maximize = partial(minimize, objective=lambda x: -x)
join = {list: chain, tuple: maximize}
fn = treeapply(tree, join)
assert fn(4) == 6 # highest value comes from the dec then double
assert fn(1) == 2 # highest value comes from the inc
def test_greedy():
inc = lambda x: x + 1
dec = lambda x: x - 1
double = lambda x: 2*x
tree = [inc, (dec, double)] # either inc or dec-then-double
fn = greedy(tree, objective=lambda x: -x)
assert fn(4) == 6 # highest value comes from the dec then double
assert fn(1) == 2 # highest value comes from the inc
tree = [inc, dec, [inc, dec, [(inc, inc), (dec, dec)]]]
lowest = greedy(tree)
assert lowest(10) == 8
highest = greedy(tree, objective=lambda x: -x)
assert highest(10) == 12
def test_allresults():
inc = lambda x: x+1
dec = lambda x: x-1
double = lambda x: x*2
square = lambda x: x**2
assert set(allresults(inc)(3)) == set([inc(3)])
assert set(allresults([inc, dec])(3)) == set([2, 4])
assert set(allresults((inc, dec))(3)) == set([3])
assert set(allresults([inc, (dec, double)])(4)) == set([5, 6])
def test_brute():
inc = lambda x: x+1
dec = lambda x: x-1
square = lambda x: x**2
tree = ([inc, dec], square)
fn = brute(tree, lambda x: -x)
assert fn(2) == (2 + 1)**2
assert fn(-2) == (-2 - 1)**2
assert brute(inc)(1) == 2
|
kingvuplus/xx-v | refs/heads/master | tools/create_picon_links.py | 192 | #
# create links for picon
# usage: create_picon_links lamedb
# run in picon directory.
# It will read the servicenames from the lamedb and create symlinks
# for the servicereference names.
import os, sys
f = open(sys.argv[1]).readlines()
f = f[f.index("services\n")+1:-3]
while len(f):
ref = [int(x, 0x10) for x in f[0][:-1].split(':')]
name = f[1][:-1]
name = name.replace('\xc2\x87', '').replace('\xc2\x86', '')
# SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1)
# X X X X D D
# REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED
# D D X X X X X X X X
refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1])
refstr = refstr.replace(':', '_')
filename = name + ".png"
linkname = refstr + ".png"
filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '')
filename = filename.replace('\n', '')
for i in range(len(filename)):
if ord(filename[i]) > 127:
filename = filename[0:i] + '_' + filename[i + 1:]
if os.access(filename, os.F_OK) and not os.access(linkname, os.F_OK):
os.symlink(filename, linkname)
else:
print "could not find %s (%s)" % (filename, name)
f =f[3:]
|
stwunsch/gnuradio | refs/heads/master | gr-blocks/python/blocks/qa_throttle.py | 57 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_throttle(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_01(self):
# Test that we can make the block
op = blocks.throttle(gr.sizeof_gr_complex, 1)
if __name__ == '__main__':
gr_unittest.run(test_throttle, "test_throttle.xml")
|
jorik041/glances | refs/heads/master | glances/core/glances_client.py | 12 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage the Glances client."""
# Import system libs
import json
import socket
import sys
try:
from xmlrpc.client import Transport, ServerProxy, ProtocolError, Fault
except ImportError:
# Python 2
from xmlrpclib import Transport, ServerProxy, ProtocolError, Fault
# Import Glances libs
from glances.core.glances_globals import version
from glances.core.glances_logging import logger
from glances.core.glances_stats import GlancesStatsClient
from glances.outputs.glances_curses import GlancesCursesClient
class GlancesClientTransport(Transport):
"""This class overwrite the default XML-RPC transport and manage timeout."""
def set_timeout(self, timeout):
self.timeout = timeout
class GlancesClient(object):
"""This class creates and manages the TCP client."""
def __init__(self, config=None, args=None, timeout=7, return_to_browser=False):
# Store the arg/config
self.args = args
self.config = config
# Default client mode
self._client_mode = 'glances'
# Return to browser or exit
self.return_to_browser = return_to_browser
# Build the URI
if args.password != "":
uri = 'http://{0}:{1}@{2}:{3}'.format(args.username, args.password,
args.client, args.port)
else:
uri = 'http://{0}:{1}'.format(args.client, args.port)
logger.debug("Try to connect to {0}".format(uri))
# Try to connect to the URI
transport = GlancesClientTransport()
# Configure the server timeout
transport.set_timeout(timeout)
try:
self.client = ServerProxy(uri, transport=transport)
except Exception as e:
self.log_and_exit("Client couldn't create socket {0}: {1}".format(uri, e))
def log_and_exit(self, msg=''):
"""Log and exit."""
if not self.return_to_browser:
logger.critical(msg)
sys.exit(2)
else:
logger.error(msg)
@property
def client_mode(self):
"""Get the client mode."""
return self._client_mode
@client_mode.setter
def client_mode(self, mode):
"""Set the client mode.
- 'glances' = Glances server (default)
- 'snmp' = SNMP (fallback)
"""
self._client_mode = mode
def login(self):
"""Logon to the server."""
ret = True
if not self.args.snmp_force:
# First of all, trying to connect to a Glances server
client_version = None
try:
client_version = self.client.init()
except socket.error as err:
# Fallback to SNMP
self.client_mode = 'snmp'
logger.error("Connection to Glances server failed: {0}".format(err))
fallbackmsg = 'No Glances server found. Trying fallback to SNMP...'
if not self.return_to_browser:
print(fallbackmsg)
else:
logger.info(fallbackmsg)
except ProtocolError as err:
# Others errors
if str(err).find(" 401 ") > 0:
msg = "Connection to server failed (bad password)"
else:
msg = "Connection to server failed ({0})".format(err)
self.log_and_exit(msg)
return False
if self.client_mode == 'glances':
# Check that both client and server are in the same major version
if version.split('.')[0] == client_version.split('.')[0]:
# Init stats
self.stats = GlancesStatsClient(config=self.config, args=self.args)
self.stats.set_plugins(json.loads(self.client.getAllPlugins()))
logger.debug("Client version: {0} / Server version: {1}".format(version, client_version))
else:
self.log_and_exit("Client and server not compatible: \
Client version: {0} / Server version: {1}".format(version, client_version))
return False
else:
self.client_mode = 'snmp'
# SNMP mode
if self.client_mode == 'snmp':
logger.info("Trying to grab stats by SNMP...")
from glances.core.glances_stats import GlancesStatsClientSNMP
# Init stats
self.stats = GlancesStatsClientSNMP(config=self.config, args=self.args)
if not self.stats.check_snmp():
self.log_and_exit("Connection to SNMP server failed")
return False
if ret:
# Load limits from the configuration file
# Each client can choose its owns limits
self.stats.load_limits(self.config)
# Init screen
self.screen = GlancesCursesClient(args=self.args)
# Return result
return ret
def update(self):
"""Update stats from Glances/SNMP server."""
if self.client_mode == 'glances':
return self.update_glances()
elif self.client_mode == 'snmp':
return self.update_snmp()
else:
self.end()
logger.critical("Unknown server mode: {0}".format(self.client_mode))
sys.exit(2)
def update_glances(self):
"""Get stats from Glances server.
Return the client/server connection status:
- Connected: Connection OK
- Disconnected: Connection NOK
"""
# Update the stats
try:
server_stats = json.loads(self.client.getAll())
server_stats['monitor'] = json.loads(self.client.getAllMonitored())
except socket.error:
# Client cannot get server stats
return "Disconnected"
except Fault:
# Client cannot get server stats (issue #375)
return "Disconnected"
else:
# Put it in the internal dict
self.stats.update(server_stats)
return "Connected"
def update_snmp(self):
"""Get stats from SNMP server.
Return the client/server connection status:
- SNMP: Connection with SNMP server OK
- Disconnected: Connection NOK
"""
# Update the stats
try:
self.stats.update()
except Exception:
# Client cannot get SNMP server stats
return "Disconnected"
else:
# Grab success
return "SNMP"
def serve_forever(self):
"""Main client loop."""
exitkey = False
try:
while True and not exitkey:
# Update the stats
cs_status = self.update()
# Update the screen
exitkey = self.screen.update(self.stats,
cs_status=cs_status,
return_to_browser=self.return_to_browser)
# Export stats using export modules
self.stats.export(self.stats)
except Exception as e:
logger.critical(e)
self.end()
return self.client_mode
def end(self):
"""End of the client session."""
self.screen.end()
|
farhi-naz/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py | 120 | # Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import shutil
import tempfile
import threading
import unittest2 as unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate, TerminateQueue
class LoggingDelegate(QueueEngineDelegate):
def __init__(self, test):
self._test = test
self._callbacks = []
self._run_before = False
self.stop_message = None
expected_callbacks = [
'queue_log_path',
'begin_work_queue',
'should_continue_work_queue',
'next_work_item',
'work_item_log_path',
'process_work_item',
'should_continue_work_queue',
'stop_work_queue',
]
def record(self, method_name):
self._callbacks.append(method_name)
def queue_log_path(self):
self.record("queue_log_path")
return os.path.join(self._test.temp_dir, "queue_log_path")
def work_item_log_path(self, work_item):
self.record("work_item_log_path")
return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item)
def begin_work_queue(self):
self.record("begin_work_queue")
def should_continue_work_queue(self):
self.record("should_continue_work_queue")
if not self._run_before:
self._run_before = True
return True
return False
def next_work_item(self):
self.record("next_work_item")
return "work_item"
def process_work_item(self, work_item):
self.record("process_work_item")
self._test.assertEqual(work_item, "work_item")
return True
def handle_unexpected_error(self, work_item, message):
self.record("handle_unexpected_error")
self._test.assertEqual(work_item, "work_item")
def stop_work_queue(self, message):
self.record("stop_work_queue")
self.stop_message = message
class RaisingDelegate(LoggingDelegate):
def __init__(self, test, exception):
LoggingDelegate.__init__(self, test)
self._exception = exception
def process_work_item(self, work_item):
self.record("process_work_item")
raise self._exception
class FastQueueEngine(QueueEngine):
def __init__(self, delegate):
QueueEngine.__init__(self, "fast-queue", delegate, threading.Event())
# No sleep for the wicked.
seconds_to_sleep = 0
def _sleep(self, message):
pass
class QueueEngineTest(unittest.TestCase):
def test_trivial(self):
delegate = LoggingDelegate(self)
self._run_engine(delegate)
self.assertEqual(delegate.stop_message, "Delegate terminated queue.")
self.assertEqual(delegate._callbacks, LoggingDelegate.expected_callbacks)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path")))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log")))
def test_unexpected_error(self):
delegate = RaisingDelegate(self, ScriptError(exit_code=3))
self._run_engine(delegate)
expected_callbacks = LoggingDelegate.expected_callbacks[:]
work_item_index = expected_callbacks.index('process_work_item')
# The unexpected error should be handled right after process_work_item starts
# but before any other callback. Otherwise callbacks should be normal.
expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error')
self.assertEqual(delegate._callbacks, expected_callbacks)
def test_handled_error(self):
delegate = RaisingDelegate(self, ScriptError(exit_code=QueueEngine.handled_error_code))
self._run_engine(delegate)
self.assertEqual(delegate._callbacks, LoggingDelegate.expected_callbacks)
def _run_engine(self, delegate, engine=None, termination_message=None):
if not engine:
engine = QueueEngine("test-queue", delegate, threading.Event())
if not termination_message:
termination_message = "Delegate terminated queue."
expected_logs = "\n%s\n" % termination_message
OutputCapture().assert_outputs(self, engine.run, expected_logs=expected_logs)
def _test_terminating_queue(self, exception, termination_message):
work_item_index = LoggingDelegate.expected_callbacks.index('process_work_item')
# The terminating error should be handled right after process_work_item.
# There should be no other callbacks after stop_work_queue.
expected_callbacks = LoggingDelegate.expected_callbacks[:work_item_index + 1]
expected_callbacks.append("stop_work_queue")
delegate = RaisingDelegate(self, exception)
self._run_engine(delegate, termination_message=termination_message)
self.assertEqual(delegate._callbacks, expected_callbacks)
self.assertEqual(delegate.stop_message, termination_message)
def test_terminating_error(self):
self._test_terminating_queue(KeyboardInterrupt(), "User terminated queue.")
self._test_terminating_queue(TerminateQueue(), "TerminateQueue exception received.")
def test_now(self):
"""Make sure there are no typos in the QueueEngine.now() method."""
engine = QueueEngine("test", None, None)
self.assertIsInstance(engine._now(), datetime.datetime)
def test_sleep_message(self):
engine = QueueEngine("test", None, None)
engine._now = lambda: datetime.datetime(2010, 1, 1)
expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (120 seconds)."
self.assertEqual(engine._sleep_message("MESSAGE"), expected_sleep_message)
def setUp(self):
self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs")
def tearDown(self):
shutil.rmtree(self.temp_dir)
|
jsoref/django | refs/heads/master | django/core/management/commands/migrate.py | 19 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ProjectState
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument('app_label', nargs='?',
help='App label of an application to synchronize the state.')
parser.add_argument('migration_name', nargs='?',
help=(
'Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.'
),
)
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.')
parser.add_argument('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them.')
parser.add_argument('--fake-initial', action='store_true', dest='fake_initial', default=False,
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.')
parser.add_argument('--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options.get('run_syncdb') and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(executor.loader.unmigrated_apps))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(set(a for a, n in targets)) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
emit_pre_migrate_signal(self.verbosity, self.interactive, connection.alias)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
executor.check_replacements()
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
else:
fake = options.get("fake")
fake_initial = options.get("fake_initial")
executor.migrate(targets, plan, fake=fake, fake_initial=fake_initial)
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
created_models = set()
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=connection.features.can_rollback_ddl):
deferred_sql = []
for app_name, model_list in manifest.items():
for model in model_list:
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
with connection.schema_editor() as editor:
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
deferred_sql.extend(editor.deferred_sql)
editor.deferred_sql = []
created_models.add(model)
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
for statement in deferred_sql:
cursor.execute(statement)
finally:
cursor.close()
return created_models
|
stevekuznetsov/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_role.py | 38 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the role.
required: true
id:
description:
- ID of the role.
- If provided, C(id) is used as key.
required: false
default: null
aliases: [ 'uuid' ]
role_type:
description:
- Type of the role.
- Only considered for creation.
required: false
default: User
choices: [ 'User', 'DomainAdmin', 'ResourceAdmin', 'Admin' ]
description:
description:
- Description of the role.
required: false
default: null
state:
description:
- State of the role.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure an user role is present
- local_action:
module: cs_role
name: myrole_user
# Ensure a role having particular ID is named as myrole_user
- local_action:
module: cs_role
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
# Ensure a role is absent
- local_action:
module: cs_role
name: myrole_user
state: absent
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: string
sample: myrole
description:
description: Description of the role.
returned: success
type: string
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: string
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, CloudStackException, cs_argument_spec, cs_required_together
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.cs.createRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
res = self.cs.deleteRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(default=None, aliases=['id']),
name=dict(required=True),
description=dict(default=None),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
vmanoria/bluemix-hue-filebrowser | refs/heads/master | hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/django/contrib/formtools/tests/forms.py | 123 | from django import forms
class TestForm(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
date1 = forms.DateField(required=False)
class HashTestForm(forms.Form):
name = forms.CharField()
bio = forms.CharField()
class HashTestBlankForm(forms.Form):
name = forms.CharField(required=False)
bio = forms.CharField(required=False)
|
TiVo/kafka | refs/heads/trunk | tests/kafkatest/services/security/__init__.py | 84 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
rtindru/django | refs/heads/master | tests/model_options/models/default_related_name.py | 140 | from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
class Editor(models.Model):
name = models.CharField(max_length=128)
bestselling_author = models.ForeignKey(Author)
class Book(models.Model):
title = models.CharField(max_length=128)
authors = models.ManyToManyField(Author)
editor = models.ForeignKey(Editor, related_name="edited_books")
class Meta:
default_related_name = "books"
class Store(models.Model):
name = models.CharField(max_length=128)
address = models.CharField(max_length=128)
class Meta:
abstract = True
default_related_name = "%(app_label)s_%(model_name)ss"
class BookStore(Store):
available_books = models.ManyToManyField(Book)
class EditorStore(Store):
editor = models.ForeignKey(Editor)
available_books = models.ManyToManyField(Book)
class Meta:
default_related_name = "editor_stores"
|
freevo/freevo1 | refs/heads/master | src/plugins/screensaver/__init__.py | 1 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# screensaver/__init__.py - the Freevo Screensaver
# -----------------------------------------------------------------------
# $Id$
#
# Notes:
#
# Todo:
#
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
import logging
logger = logging.getLogger("freevo.plugins.screensaver")
import time
import os
import random
import traceback
import pygame
import config
import plugin
import rc
from event import Event
import osd
import skin
import kaa
osd = osd.get_singleton()
#skin = skin.get_singleton()
class PluginInterface(plugin.DaemonPlugin):
"""
An inbuilt Freevo Screensaver that requires no other program be installed to
run.
This plugin just proves the necessary logic to determine when to launch and
stop a screensaver, to see some exciting (!?) graphics on the screen while it
is active you need to activate at least one screensaver child plugin.
For example
| plugin.activate('screensaver')
| plugin.activate('screensaver.balls')
| plugin.activate('screensaver.bouncing_freevo')
Would activate and cycle between the balls screensaver and the bouncing
freevo logo screensaver.
Use 'freevo plugins -l' to see a list of available ScreenSaverPlugins.
"""
def __init__(self):
logger.log( 9, 'PluginInterface.__init__()')
plugin.DaemonPlugin.__init__(self)
self.event_listener = True
self.last_event = time.time()
self.screensaver_showing = False
self.menuw = None
self.start_delay = config.SCREENSAVER_DELAY
self.cycle_time = config.SCREENSAVER_CYCLE_TIME
self.plugins = None
self.start_timer = kaa.OneShotTimer(self.start_saver)
self.dpms_timer = kaa.OneShotTimer(self.enable_dpms)
self.dpms_enabled = False
self.timer = None
logger.debug('Screensaver install (delay = %d)', self.start_delay)
def config(self):
logger.log( 9, 'config()')
return [
('SCREENSAVER_DELAY', 300, '# of seconds to wait to start saver.'),
('SCREENSAVER_CYCLE_TIME', 60, '# of seconds to run a screensaver before starting another saver.'),
('SCREENSAVER_SCREEN_OFF_DELAY', 3600, '# of seconds screensaver has been active before using DPMS to turn the display off, set to 0 to disable' )
]
def eventhandler(self, event=None, menuw=None, arg=None):
"""
eventhandler to handle the events. Always return False since we
are just a listener and really can't send back True.
"""
logger.log( 9, 'eventhandler(event=%r, menuw=%r, arg=%r)', event.name, menuw, arg)
if menuw:
self.menuw = menuw
if plugin.isevent(event) != 'IDENTIFY_MEDIA':
self.start_timer.start(self.start_delay)
if self.screensaver_showing:
self.stop_saver()
return True
return False
def shutdown(self):
logger.log( 9, 'shutdown()')
self.stop_saver()
def start_saver(self):
logger.log( 9, 'start_saver()')
if self.screensaver_showing or not skin.active():
return
self.screensaver_showing = True
if self.plugins is None:
self.plugins = plugin.get('screensaver')
logger.debug('plugins=%r', self.plugins)
osd.screensaver_running = True
skin.clear()
self.current_saver = None
self.index = 0
plugins_count = len(self.plugins)
logger.debug('found %s screensaver(s)', plugins_count)
if config.SCREENSAVER_SCREEN_OFF_DELAY:
logger.debug('Enabling DPMS timer')
self.dpms_timer.start(config.SCREENSAVER_SCREEN_OFF_DELAY)
self.__next()
def stop_saver(self):
logger.log( 9, 'stop_saver()')
if self.timer is not None:
self.disable_dpms()
self.dpms_timer.stop()
self.timer.stop()
self.screensaver_showing = False
skin.redraw()
osd.screensaver_running = False
osd.update()
logger.debug('Screensaver thread stopped')
def enable_dpms(self):
self.dpms_enabled = True
self.timer.stop()
osd.clearscreen(osd.COL_BLACK)
osd.update()
logger.debug('Forced DPMS OFF')
os.system('xset dpms force off')
def disable_dpms(self):
self.dpms_enabled = False
logger.debug('Forced DPMS ON')
os.system('xset dpms force on')
def __next(self):
plugins_count = len(self.plugins)
# No current screensaver so select one of the installed screensaver
# plugins at random
# if current_saver is None:
if plugins_count == 1:
self.current_saver = self.plugins[0]
elif plugins_count > 1 and plugins_count <= 4:
self.current_saver = self.plugins[self.index]
self.index += 1
if self.index >= plugins_count:
self.index = 0
elif plugins_count > 4:
self.index = random.randint(0, len(self.plugins) - 1)
self.current_saver = self.plugins[self.index]
# No screensaver found just sleep for 200ms
if self.current_saver is None:
self.timer = kaa.OneShotTimer(self.__next)
self.timer.start(0.2)
else:
self.__run_screensaver__(self.current_saver)
def __run_screensaver__(self, screensaver):
logger.log( 9, '__run_screensaver__(screensaver=%r)', screensaver.plugin_name)
try:
fps = screensaver.start(osd.width, osd.height)
time_per_frame = 1.0 / fps
max_iterations = int(self.cycle_time / time_per_frame)
iteration = 0
self.__draw(screensaver, time_per_frame, 0, max_iterations )
except:
print 'Screensaver %s crashed!' % screensaver.plugin_name
traceback.print_exc()
# Remove the broken screensaver so we don't try to run it again
self.plugins.remove(screensaver)
def __draw(self, screensaver, time_per_frame, iteration, max_iterations):
s = time.time()
try:
screensaver.draw(osd.screen)
pygame.display.flip()
except:
iteration = max_iterations
print 'Screensaver %s crashed!' % screensaver.plugin_name
traceback.print_exc()
# Remove the broken screensaver so we don't try to run it again
self.plugins.remove(screensaver)
e = time.time()
t = e - s
iteration += 1
if iteration < max_iterations:
d = time_per_frame - t
if d < 0.0:
d = time_per_frame
self.timer = kaa.OneShotTimer(self.__draw, screensaver, time_per_frame, iteration, max_iterations)
self.timer.start(d)
else:
try:
screensaver.stop()
except:
print 'Screensaver %s crashed when stopping' % screensaver.plugin_name
osd.clearscreen(osd.COL_BLACK)
osd.update()
self.__next()
class ScreenSaverPlugin(plugin.Plugin):
def __init__(self):
logger.log( 9, 'ScreenSaverPlugin.__init__()')
plugin.Plugin.__init__(self)
self._type = 'screensaver'
def start(self, width, height):
logger.log( 9, 'start(width=%r, height=%r)', width, height)
"""
Initialise the screensaver before each run.
Returns the number of frames per second the saver
wants to run at.
"""
return 25
def stop(self):
logger.log( 9, 'stop()')
"""
Deinitialise the screensaver after each run.
"""
pass
def draw(self, surface):
"""
Draw a frame onto the supplied surface called
every 1/fps seconds (where fps was returned by start())
"""
logger.log( 9, 'draw(surface=%r)', surface)
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.