repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
agry/NGECore2 | scripts/mobiles/dathomir/mutant_ baz_nitch.py | 2 | 1585 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('mutant_baz_nitch')
mobileTemplate.setLevel(63)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Wild Meat")
mobileTemplate.setMeatAmount(5)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(2)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(2)
mobileTemplate.setSocialGroup("baz nitch")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_baz_nitch_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_4')
attacks.add('bm_disease_4')
attacks.add('bm_enfeeble_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('mutant_baz_nitch', mobileTemplate)
return | lgpl-3.0 |
CYBERBUGJR/Diamond | src/collectors/snmpraw/snmpraw.py | 56 | 6074 | # coding=utf-8
"""
The SNMPRawCollector is designed for collecting data from SNMP-enables devices,
using a set of specified OIDs
#### Configuration
Below is an example configuration for the SNMPRawCollector. The collector
can collect data any number of devices by adding configuration sections
under the *devices* header. By default the collector will collect every 60
seconds. This might be a bit excessive and put unnecessary load on the
devices being polled. You may wish to change this to every 300 seconds. However
you need modify your graphite data retentions to handle this properly.
```
# Options for SNMPRawCollector
enabled = True
interval = 60
[devices]
# Start the device configuration
# Note: this name will be used in the metric path.
[[my-identification-for-this-host]]
host = localhost
port = 161
community = public
# Start the OID list for this device
# Note: the value part will be used in the metric path.
[[[oids]]]
1.3.6.1.4.1.2021.10.1.3.1 = cpu.load.1min
1.3.6.1.4.1.2021.10.1.3.2 = cpu.load.5min
1.3.6.1.4.1.2021.10.1.3.3 = cpu.load.15min
# If you want another host, you can. But you probably won't need it.
[[another-identification]]
host = router1.example.com
port = 161
community = public
[[[oids]]]
oid = metric.path
oid = metric.path
```
Note: If you modify the SNMPRawCollector configuration, you will need to
restart diamond.
#### Dependencies
* pysmnp (which depends on pyasn1 0.1.7 and pycrypto)
"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def process_config(self):
super(SNMPRawCollector, self).process_config()
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. Signal HUP to diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{0}\' on \'{1}\', because: {2}'.format(
oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{0}\': [{1}]'.format(
device, data))
if len(data) != 1:
self._skip(
device,
oid,
'unexpected response, data has {0} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{0}\': [{1}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{0}\''.format(device))
dev_config = self.config['devices'][device]
if 'oids' in dev_config:
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp,
precision=self._precision(value),
metric_type='GAUGE')
self.publish_metric(metric)
| mit |
lfrdm/medpy | tests/filter_/LabelImageStatistics.py | 2 | 8480 | """Unittest for the label image statistics class."""
# build-in modules
import unittest
# third-party modules
import scipy
from scipy import stats
# path changes
# own modules
from medpy.filter import LabelImageStatistics
# information
__author__ = "Oskar Maier"
__version__ = "r0.1.1, 2011-12-29"
__email__ = "oskar.maier@googlemail.com"
__status__ = "Release"
__description__ = "Label image statistics class unittest."
# code
class TestLabelImageStatistics(unittest.TestCase):
def test_Basic(self):
"""Test the case of a region with only one intensity value and some basic return values."""
# Create label image with only one region
label_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Create original image with only one intensity value
original_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Initialize object
statistics = LabelImageStatistics(label_image, original_image)
# Check created intensity distribution
intensity_distributions = statistics.get_intensity_distributions()
self.assertEqual(len(intensity_distributions), 1)
self.assertEqual(intensity_distributions[0], 0)
intensity_distribution_histogram = statistics.get_intensity_distribution_histogram()
self.assertEqual(len(intensity_distribution_histogram[0]), statistics.get_intensity_distribution_histogram_width())
self.assertEqual(len(intensity_distribution_histogram[1]), statistics.get_intensity_distribution_histogram_width() + 1)
self.assertEqual(intensity_distribution_histogram[0][statistics.get_intensity_distribution_histogram_width()/2], 1)
self.assertEqual(intensity_distribution_histogram[0].max(), 1)
self.assertEqual(intensity_distribution_histogram[0].min(), 0)
# Check created size distribution
sizes = statistics.get_sizes()
self.assertEqual(len(sizes), 1)
self.assertEqual(sizes[0], 2*2*2)
sizes_histogram = statistics.get_size_histogram()
self.assertEqual(len(sizes_histogram[0]), statistics.get_size_histogram_width())
self.assertEqual(len(sizes_histogram[1]), statistics.get_size_histogram_width() + 1)
self.assertEqual(sizes_histogram[0][statistics.get_size_histogram_width()/2], 1)
self.assertEqual(sizes_histogram[0].max(), 1)
self.assertEqual(sizes_histogram[0].min(), 0)
# Check other statistics values
self.assertTrue(statistics.labels_are_consecutive())
self.assertEqual(statistics.get_min_intensity(), 0)
self.assertEqual(statistics.get_max_intensity(), 0)
self.assertEqual(statistics.get_min_label(), 0)
self.assertEqual(statistics.get_max_label(), 0)
self.assertEqual(statistics.get_label_count(), 1)
def test_Homogeneous(self):
"""Checks the return value for a homogeneous region."""
# Create label image with only one region
label_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Create original image with only one intensity value
original_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Initialize object
statistics = LabelImageStatistics(label_image, original_image)
# Check created intensity distribution
intensity_distributions = statistics.get_intensity_distributions()
self.assertEqual(len(intensity_distributions), 1)
self.assertEqual(intensity_distributions[0], 0)
intensity_distribution_histogram = statistics.get_intensity_distribution_histogram()
self.assertEqual(len(intensity_distribution_histogram[0]), statistics.get_intensity_distribution_histogram_width())
self.assertEqual(len(intensity_distribution_histogram[1]), statistics.get_intensity_distribution_histogram_width() + 1)
self.assertEqual(intensity_distribution_histogram[0][statistics.get_intensity_distribution_histogram_width()/2], 1)
self.assertEqual(intensity_distribution_histogram[0].max(), 1)
self.assertEqual(intensity_distribution_histogram[0].min(), 0)
def test_Equality(self):
"""Checks whether equally formed histograms in different intensity regions produce the same result."""
# Create random value for testing
x = scipy.random.randint(10, 10000)
# Create label image with only one region
label_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Create original images with two equally distributed intensity values
original_image1 = scipy.zeros(2*2*2).reshape(2,2,2)
original_image2 = scipy.zeros(2*2*2).reshape(2,2,2)
original_image1[2:,:,:] = -x
original_image1[:2,:,:] = 0
original_image2[2:,:,:] = 0
original_image2[:2,:,:] = x
# Initialize objects
statistics1 = LabelImageStatistics(label_image, original_image1)
statistics2 = LabelImageStatistics(label_image, original_image2)
# Check created intensity distribution to be different
intensity_distributions1 = statistics1.get_intensity_distributions()
intensity_distributions2 = statistics2.get_intensity_distributions()
self.assertEqual(intensity_distributions1[0], intensity_distributions2[0])
def test_Continuity(self):
"""Checks if the returned values are continuous for more spaced intensity values."""
# Create random value for testing
x = scipy.random.randint(10, 10000)
# Create label image with only one region
label_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Create original images with two equally distributed intensity values
original_image1 = scipy.zeros(2*2*2).reshape(2,2,2)
original_image2 = scipy.zeros(2*2*2).reshape(2,2,2)
original_image1[1:,:,:] = x
original_image2[1:,:,:] = 2*x
# Initialize objects
statistics1 = LabelImageStatistics(label_image, original_image1)
statistics2 = LabelImageStatistics(label_image, original_image2)
# Check created intensity distribution to be different
intensity_distributions1 = statistics1.get_intensity_distributions()
intensity_distributions2 = statistics2.get_intensity_distributions()
self.assertGreater(intensity_distributions2[0], intensity_distributions1[0])
def test_Uniform(self):
"""Checks the return value for an uniform intensity histogram."""
# might not be possible for 3D image, as 3^X never divideable through 10
pass
def test_Intensity_1(self):
"""Test a case of distributed intensity values."""
# Create label image with only one region
label_image = scipy.zeros(2*2*2, dtype=scipy.int8).reshape(2,2,2)
# Create original image with two equally distibuted intensity value
original_image = scipy.zeros(2*2*2, dtype=scipy.int8)
original_image[:4] = -1
original_image[4:] = 1
original_image = original_image.reshape(2,2,2)
# Initialize object
statistics = LabelImageStatistics(label_image, original_image)
# Computed expected result
i = scipy.array([-1,-1,-1,-1,1,1,1,1])
h = scipy.histogram(i, statistics._intensity_distribution_local_histogram_width)
hr = scipy.array(h[0]) / float(h[0].sum())
g = stats.norm(*stats.norm.fit(i))
r = abs(hr - g.pdf(h[1][:-1]))
r *= h[1][-2] - h[1][0]
r = r.sum()
# Check created intensity distribution
intensity_distributions = statistics.get_intensity_distributions()
self.assertEqual(len(intensity_distributions), 1)
self.assertEqual(intensity_distributions[0], i.std())
intensity_distribution_histogram = statistics.get_intensity_distribution_histogram()
self.assertEqual(intensity_distribution_histogram[0][statistics.get_intensity_distribution_histogram_width()/2], 1)
self.assertEqual(intensity_distribution_histogram[0].max(), 1)
self.assertEqual(intensity_distribution_histogram[0].min(), 0)
self.assertEqual(intensity_distribution_histogram[1].mean(), i.std())
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
Jusedawg/SickRage | lib/guessit/test/test_yml.py | 18 | 10503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name
import logging
# io.open supports encoding= in python 2.7
from io import open # pylint: disable=redefined-builtin
import os
import yaml
import six
import babelfish
import pytest
from rebulk.remodule import re
from rebulk.utils import is_iterable
from guessit.options import parse_options
from ..yamlutils import OrderedDictYAMLLoader
from .. import guessit
logger = logging.getLogger(__name__)
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename_predicate = None
string_predicate = None
# filename_predicate = lambda filename: 'episode_title' in filename
# string_predicate = lambda string: '-DVD.BlablaBla.Fix.Blablabla.XVID' in string
class EntryResult(object):
def __init__(self, string, negates=False):
self.string = string
self.negates = negates
self.valid = []
self.missing = []
self.different = []
self.extra = []
self.others = []
@property
def ok(self):
if self.negates:
return self.missing or self.different
return not self.missing and not self.different and not self.extra and not self.others
@property
def warning(self):
if self.negates:
return False
return not self.missing and not self.different and self.extra
@property
def error(self):
if self.negates:
return not self.missing and not self.different and not self.others
return self.missing or self.different or self.others
def __repr__(self):
if self.ok:
return self.string + ': OK!'
elif self.warning:
return '%s%s: WARNING! (valid=%i, extra=%i)' % ('-' if self.negates else '', self.string, len(self.valid),
len(self.extra))
elif self.error:
return '%s%s: ERROR! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \
('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different),
len(self.extra), len(self.others))
else:
return '%s%s: UNKOWN! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \
('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different),
len(self.extra), len(self.others))
@property
def details(self):
ret = []
if self.valid:
ret.append('valid=' + str(len(self.valid)))
for valid in self.valid:
ret.append(' ' * 4 + str(valid))
if self.missing:
ret.append('missing=' + str(len(self.missing)))
for missing in self.missing:
ret.append(' ' * 4 + str(missing))
if self.different:
ret.append('different=' + str(len(self.different)))
for different in self.different:
ret.append(' ' * 4 + str(different))
if self.extra:
ret.append('extra=' + str(len(self.extra)))
for extra in self.extra:
ret.append(' ' * 4 + str(extra))
if self.others:
ret.append('others=' + str(len(self.others)))
for other in self.others:
ret.append(' ' * 4 + str(other))
return ret
class Results(list):
def assert_ok(self):
errors = [entry for entry in self if entry.error]
assert not errors
def files_and_ids(predicate=None):
files = []
ids = []
for (dirpath, _, filenames) in os.walk(__location__):
if dirpath == __location__:
dirpath_rel = ''
else:
dirpath_rel = os.path.relpath(dirpath, __location__)
for filename in filenames:
name, ext = os.path.splitext(filename)
filepath = os.path.join(dirpath_rel, filename)
if ext == '.yml' and (not predicate or predicate(filepath)):
files.append(filepath)
ids.append(os.path.join(dirpath_rel, name))
return files, ids
class TestYml(object):
"""
Run tests from yaml files.
Multiple input strings having same expected results can be chained.
Use $ marker to check inputs that should not match results.
"""
options_re = re.compile(r'^([ \+-]+)(.*)')
files, ids = files_and_ids(filename_predicate)
@staticmethod
def set_default(expected, default):
if default:
for k, v in default.items():
if k not in expected:
expected[k] = v
@pytest.mark.parametrize('filename', files, ids=ids)
def test(self, filename, caplog):
caplog.setLevel(logging.INFO)
with open(os.path.join(__location__, filename), 'r', encoding='utf-8') as infile:
data = yaml.load(infile, OrderedDictYAMLLoader)
entries = Results()
last_expected = None
for string, expected in reversed(list(data.items())):
if expected is None:
data[string] = last_expected
else:
last_expected = expected
default = None
try:
default = data['__default__']
del data['__default__']
except KeyError:
pass
for string, expected in data.items():
TestYml.set_default(expected, default)
entry = self.check_data(filename, string, expected)
entries.append(entry)
entries.assert_ok()
def check_data(self, filename, string, expected):
if six.PY2 and isinstance(string, six.text_type):
string = string.encode('utf-8')
converts = []
for k, v in expected.items():
if isinstance(v, six.text_type):
v = v.encode('utf-8')
converts.append((k, v))
for k, v in converts:
expected[k] = v
if not isinstance(string, str):
string = str(string)
if not string_predicate or string_predicate(string): # pylint: disable=not-callable
entry = self.check(string, expected)
if entry.ok:
logger.debug('[' + filename + '] ' + str(entry))
elif entry.warning:
logger.warning('[' + filename + '] ' + str(entry))
elif entry.error:
logger.error('[' + filename + '] ' + str(entry))
for line in entry.details:
logger.error('[' + filename + '] ' + ' ' * 4 + line)
return entry
def check(self, string, expected):
negates, global_, string = self.parse_token_options(string)
options = expected.get('options')
if options is None:
options = {}
if not isinstance(options, dict):
options = parse_options(options)
if 'implicit' not in options:
options['implicit'] = True
try:
result = guessit(string, options)
except Exception as exc:
logger.error('[' + string + '] Exception: ' + str(exc))
raise exc
entry = EntryResult(string, negates)
if global_:
self.check_global(string, result, entry)
self.check_expected(result, expected, entry)
return entry
def parse_token_options(self, string):
matches = self.options_re.search(string)
negates = False
global_ = False
if matches:
string = matches.group(2)
for opt in matches.group(1):
if '-' in opt:
negates = True
if '+' in opt:
global_ = True
return negates, global_, string
def check_global(self, string, result, entry):
global_span = []
for result_matches in result.matches.values():
for result_match in result_matches:
if not global_span:
global_span = list(result_match.span)
else:
if global_span[0] > result_match.span[0]:
global_span[0] = result_match.span[0]
if global_span[1] < result_match.span[1]:
global_span[1] = result_match.span[1]
if global_span and global_span[1] - global_span[0] < len(string):
entry.others.append("Match is not global")
def is_same(self, value, expected):
values = set(value) if is_iterable(value) else set((value,))
expecteds = set(expected) if is_iterable(expected) else set((expected,))
if len(values) != len(expecteds):
return False
if isinstance(next(iter(values)), babelfish.Language):
# pylint: disable=no-member
expecteds = set([babelfish.Language.fromguessit(expected) for expected in expecteds])
elif isinstance(next(iter(values)), babelfish.Country):
# pylint: disable=no-member
expecteds = set([babelfish.Country.fromguessit(expected) for expected in expecteds])
return values == expecteds
def check_expected(self, result, expected, entry):
if expected:
for expected_key, expected_value in expected.items():
if expected_key and expected_key != 'options' and expected_value is not None:
negates_key, _, result_key = self.parse_token_options(expected_key)
if result_key in result.keys():
if not self.is_same(result[result_key], expected_value):
if negates_key:
entry.valid.append((expected_key, expected_value))
else:
entry.different.append((expected_key, expected_value, result[expected_key]))
else:
if negates_key:
entry.different.append((expected_key, expected_value, result[expected_key]))
else:
entry.valid.append((expected_key, expected_value))
elif not negates_key:
entry.missing.append((expected_key, expected_value))
for result_key, result_value in result.items():
if result_key not in expected.keys():
entry.extra.append((result_key, result_value))
| gpl-3.0 |
Microvellum/Fluid-Designer | win64-vc/2.78/scripts/startup/bl_ui/properties_physics_fluid.py | 2 | 10739 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Panel, Menu
from bpy.app.translations import pgettext_iface as iface_
class FLUID_MT_presets(Menu):
bl_label = "Fluid Presets"
preset_subdir = "fluid"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
ob = context.object
rd = context.scene.render
return (ob and ob.type == 'MESH') and rd.engine in cls.COMPAT_ENGINES and (context.fluid)
class PHYSICS_PT_fluid(PhysicButtonsPanel, Panel):
bl_label = "Fluid"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
md = context.fluid
fluid = md.settings
col = layout.column()
if not bpy.app.build_options.mod_fluid:
col.label("Built without fluids")
return
col.prop(fluid, "type")
if fluid.type not in {'NONE', 'DOMAIN', 'PARTICLE', 'FLUID', 'OBSTACLE'}:
col.prop(fluid, "use")
layout = layout.column()
if fluid.type not in {'NONE', 'DOMAIN', 'PARTICLE', 'FLUID', 'OBSTACLE'}:
layout.active = fluid.use
if fluid.type == 'DOMAIN':
# odd formatting here so translation script can extract string
layout.operator("fluid.bake", text=iface_("Bake (Req. Memory: %s)") % fluid.memory_estimate,
translate=False, icon='MOD_FLUIDSIM')
if bpy.app.build_options.openmp:
layout.prop(fluid, "threads", text="Simulation Threads")
split = layout.split()
col = split.column()
col.label(text="Resolution:")
col.prop(fluid, "resolution", text="Final")
col.label(text="Render Display:")
col.prop(fluid, "render_display_mode", text="")
col = split.column()
col.label()
col.prop(fluid, "preview_resolution", text="Preview")
col.label(text="Viewport Display:")
col.prop(fluid, "viewport_display_mode", text="")
split = layout.split()
col = split.column()
col.label(text="Time:")
sub = col.column(align=True)
sub.prop(fluid, "start_time", text="Start")
sub.prop(fluid, "end_time", text="End")
col.prop(fluid, "simulation_rate", text="Speed")
col = split.column()
col.label()
sub = col.column(align=True)
sub.prop(fluid, "use_speed_vectors")
sub.prop(fluid, "use_reverse_frames")
col.prop(fluid, "frame_offset", text="Offset")
layout.prop(fluid, "filepath", text="")
elif fluid.type == 'FLUID':
split = layout.split()
col = split.column()
col.label(text="Volume Initialization:")
col.prop(fluid, "volume_initialization", text="")
col.prop(fluid, "use_animated_mesh")
col = split.column()
col.label(text="Initial Velocity:")
col.prop(fluid, "initial_velocity", text="")
elif fluid.type == 'OBSTACLE':
split = layout.split()
col = split.column()
col.label(text="Volume Initialization:")
col.prop(fluid, "volume_initialization", text="")
col.prop(fluid, "use_animated_mesh")
col = split.column()
subsplit = col.split()
subcol = subsplit.column()
if fluid.use_animated_mesh:
subcol.enabled = False
subcol.label(text="Slip Type:")
subcol.prop(fluid, "slip_type", text="")
if fluid.slip_type == 'PARTIALSLIP':
subcol.prop(fluid, "partial_slip_factor", slider=True, text="Amount")
col.label(text="Impact:")
col.prop(fluid, "impact_factor", text="Factor")
elif fluid.type == 'INFLOW':
split = layout.split()
col = split.column()
col.label(text="Volume Initialization:")
col.prop(fluid, "volume_initialization", text="")
col.prop(fluid, "use_animated_mesh")
row = col.row()
row.active = not fluid.use_animated_mesh
row.prop(fluid, "use_local_coords")
col = split.column()
col.label(text="Inflow Velocity:")
col.prop(fluid, "inflow_velocity", text="")
elif fluid.type == 'OUTFLOW':
col = layout.column()
col.label(text="Volume Initialization:")
col.prop(fluid, "volume_initialization", text="")
col.prop(fluid, "use_animated_mesh")
elif fluid.type == 'PARTICLE':
split = layout.split()
col = split.column()
col.label(text="Influence:")
col.prop(fluid, "particle_influence", text="Size")
col.prop(fluid, "alpha_influence", text="Alpha")
col = split.column()
col.label(text="Type:")
col.prop(fluid, "use_drops")
col.prop(fluid, "use_floats")
col.prop(fluid, "show_tracer")
layout.prop(fluid, "filepath", text="")
elif fluid.type == 'CONTROL':
split = layout.split()
col = split.column()
col.label(text="")
col.prop(fluid, "quality", slider=True)
col.prop(fluid, "use_reverse_frames")
col = split.column()
col.label(text="Time:")
sub = col.column(align=True)
sub.prop(fluid, "start_time", text="Start")
sub.prop(fluid, "end_time", text="End")
split = layout.split()
col = split.column()
col.label(text="Attraction Force:")
sub = col.column(align=True)
sub.prop(fluid, "attraction_strength", text="Strength")
sub.prop(fluid, "attraction_radius", text="Radius")
col = split.column()
col.label(text="Velocity Force:")
sub = col.column(align=True)
sub.prop(fluid, "velocity_strength", text="Strength")
sub.prop(fluid, "velocity_radius", text="Radius")
class PHYSICS_PT_domain_gravity(PhysicButtonsPanel, Panel):
bl_label = "Fluid World"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
md = context.fluid
rd = context.scene.render
return md and md.settings and (md.settings.type == 'DOMAIN') and rd.engine in cls.COMPAT_ENGINES
def draw(self, context):
layout = self.layout
fluid = context.fluid.settings
scene = context.scene
split = layout.split()
col = split.column()
if scene.use_gravity:
col.label(text="Use Scene Gravity", icon='SCENE_DATA')
sub = col.column()
sub.enabled = False
sub.prop(fluid, "gravity", text="")
else:
col.label(text="Gravity:")
col.prop(fluid, "gravity", text="")
if scene.unit_settings.system != 'NONE':
col.label(text="Use Scene Size Units", icon='SCENE_DATA')
sub = col.column()
sub.enabled = False
sub.prop(fluid, "simulation_scale", text="Meters")
else:
col.label(text="Real World Size:")
col.prop(fluid, "simulation_scale", text="Meters")
col = split.column()
col.label(text="Viscosity Presets:")
sub = col.row(align=True)
sub.menu("FLUID_MT_presets", text=bpy.types.FLUID_MT_presets.bl_label)
sub.operator("fluid.preset_add", text="", icon='ZOOMIN')
sub.operator("fluid.preset_add", text="", icon='ZOOMOUT').remove_active = True
sub = col.column(align=True)
sub.prop(fluid, "viscosity_base", text="Base")
sub.prop(fluid, "viscosity_exponent", text="Exponent", slider=True)
col.label(text="Optimization:")
col.prop(fluid, "grid_levels", slider=True)
col.prop(fluid, "compressibility", slider=True)
class PHYSICS_PT_domain_boundary(PhysicButtonsPanel, Panel):
bl_label = "Fluid Boundary"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
md = context.fluid
rd = context.scene.render
return md and md.settings and (md.settings.type == 'DOMAIN') and rd.engine in cls.COMPAT_ENGINES
def draw(self, context):
layout = self.layout
fluid = context.fluid.settings
split = layout.split()
col = split.column()
col.label(text="Slip Type:")
col.prop(fluid, "slip_type", text="")
if fluid.slip_type == 'PARTIALSLIP':
col.prop(fluid, "partial_slip_factor", slider=True, text="Amount")
col.prop(fluid, "use_surface_noobs")
col = split.column()
col.label(text="Surface:")
col.prop(fluid, "surface_smooth", text="Smoothing")
col.prop(fluid, "surface_subdivisions", text="Subdivisions")
class PHYSICS_PT_domain_particles(PhysicButtonsPanel, Panel):
bl_label = "Fluid Particles"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
md = context.fluid
rd = context.scene.render
return md and md.settings and (md.settings.type == 'DOMAIN') and rd.engine in cls.COMPAT_ENGINES
def draw(self, context):
layout = self.layout
fluid = context.fluid.settings
row = layout.row()
row.prop(fluid, "tracer_particles", text="Tracer")
row.prop(fluid, "generate_particles", text="Generate")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
| gpl-3.0 |
klmitch/glance | glance/db/sqlalchemy/migrate_repo/schema.py | 3 | 3251 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Various conveniences used for migration scripts
"""
from oslo_log import log as logging
import sqlalchemy.types
from glance.i18n import _LI
LOG = logging.getLogger(__name__)
String = lambda length: sqlalchemy.types.String(
length=length, convert_unicode=False,
unicode_error=None, _warn_on_bytestring=False)
Text = lambda: sqlalchemy.types.Text(
length=None, convert_unicode=False,
unicode_error=None, _warn_on_bytestring=False)
Boolean = lambda: sqlalchemy.types.Boolean(create_constraint=True, name=None)
DateTime = lambda: sqlalchemy.types.DateTime(timezone=False)
Integer = lambda: sqlalchemy.types.Integer()
BigInteger = lambda: sqlalchemy.types.BigInteger()
PickleType = lambda: sqlalchemy.types.PickleType()
Numeric = lambda: sqlalchemy.types.Numeric()
def from_migration_import(module_name, fromlist):
"""
Import a migration file and return the module
:param module_name: name of migration module to import from
(ex: 001_add_images_table)
:param fromlist: list of items to import (ex: define_images_table)
:returns: module object
This bit of ugliness warrants an explanation:
As you're writing migrations, you'll frequently want to refer to
tables defined in previous migrations.
In the interest of not repeating yourself, you need a way of importing
that table into a 'future' migration.
However, tables are bound to metadata, so what you need to import is
really a table factory, which you can late-bind to your current
metadata object.
Moreover, migrations begin with a number (001...), which means they
aren't valid Python identifiers. This means we can't perform a
'normal' import on them (the Python lexer will 'splode). Instead, we
need to use __import__ magic to bring the table-factory into our
namespace.
Example Usage:
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta)
# Refer to images table
"""
module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name
module = __import__(module_path, globals(), locals(), fromlist, 0)
return [getattr(module, item) for item in fromlist]
def create_tables(tables):
for table in tables:
LOG.info(_LI("creating table %(table)s"), {'table': table})
table.create()
def drop_tables(tables):
for table in tables:
LOG.info(_LI("dropping table %(table)s"), {'table': table})
table.drop()
| apache-2.0 |
jlanga/exon_finder | tests/custom_assertions.py | 2 | 5708 | #!/usr/bin/env python3
"""
tests.custom_assertions.py: custom assertions for unit tests:
- assertEqualListOfSeqrecords: check if a list of seqrecords have:
- the same length
- the same id
- the same sequence
- assertEqualSpliceGraphs: check if two splice graphs:
- are isomorphic with nx.is_isomorphic
- each node have the same coordinates
- each edge have the same overlap
"""
from typing import List, Dict
import networkx as nx
import pandas as pd
from Bio.SeqRecord import SeqRecord
def check_same_keys(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the exact same keys"""
if set(dict1.keys()) != set(dict2.keys()):
raise KeyError("Keys differ: {keys1} {keys2}".format(
keys1=dict1.keys(), keys2=dict2.keys()
))
def check_same_values(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the same values"""
for key, value1 in dict1.items(): # Check same values
value2 = dict2[key]
if value1 != value2:
raise ValueError("{key1}: {value1} != {key2} : {value2}".format(
key1=key, value1=value1, key2=key, value2=value2
))
def check_same_dict(dict1: dict, dict2: dict) -> None:
"""Check if two dicts contain the exact same values"""
check_same_keys(dict1, dict2)
check_same_values(dict1, dict2)
def check_equal_node2coord(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
node2coord1 = nx.get_node_attributes(G=sg1, name="coordinates")
node2coord2 = nx.get_node_attributes(G=sg2, name="coordinates")
check_same_dict(node2coord1, node2coord2)
def check_equal_edge2overlap(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
edge2overlap1 = nx.get_edge_attributes(G=sg1, name="overlaps")
edge2overlap2 = nx.get_edge_attributes(G=sg2, name="overlaps")
check_same_dict(edge2overlap1, edge2overlap2)
def check_equal_df_dict_values(dict1: dict, dict2: dict) -> None:
"""Check if two data frames are equal
Solution: https://stackoverflow.com/a/33223893
"""
from numpy import array_equal
for key, df1 in dict1.items():
df2 = dict2[key]
if not array_equal(df1, df2):
raise ValueError("df1 != df2:\n{df1}\n{df2}".format(df1=df1, df2=df2))
def check_equal_splice_graphs(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs are:
- isomorphic
- node2coord are equal
- edge2overlaps are equal
"""
if not nx.is_isomorphic(sg1, sg2):
AssertionError("splicegraph are not isomorphic")
check_equal_node2coord(sg1, sg2)
check_equal_edge2overlap(sg1, sg2)
def check_equal_dict_of_sg(dict1: dict, dict2: dict) -> None:
"""Check if each key, element are equal splice graphs"""
check_same_keys(dict1, dict2)
for key, sg1 in dict1.items():
sg2 = dict2[key]
check_equal_splice_graphs(sg1, sg2)
def check_equal_length(iter1: List, iter2: List) -> None:
"""Check if two iterables have the same length"""
length_1 = len(iter1)
length_2 = len(iter2)
if length_1 != length_2:
raise AssertionError('Lengths differ: {len_1} != {len_2}'.format(
len_1=length_1, len_2=length_2
))
def check_equal_seqrecrods(seqrecord1: SeqRecord, seqrecord2: SeqRecord) -> None:
"""Check if id and seq are equal"""
if seqrecord1.id != seqrecord2.id or seqrecord1.seq != seqrecord2.seq:
raise AssertionError(
'Records differ: {id1}: {seq1} {id2}: {seq2}'.format(
id1=seqrecord1.id, seq1=seqrecord1.seq, id2=seqrecord2.id, seq2=seqrecord2.seq
)
)
def check_equal_list_seqrecords(iter1: List[SeqRecord], iter2: List[SeqRecord]) -> None:
"""Check if a list of SeqRecords are equal"""
for i, _ in enumerate(iter1):
check_equal_seqrecrods(iter1[i], iter2[i])
class CustomAssertions:
"""
Custom assertions not covered in unittest:
- assertEqualListOfSeqrecords
"""
@classmethod
def assertEqualDict(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts are equal (values are compared with ==)"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_same_dict(dict1, dict2)
@classmethod
def assertEqualListOfSeqrecords(
self, records1: List[SeqRecord], records2: List[SeqRecord]) -> None:
"""
Check if each element of list_of_seqrecords1 is exactly equal to each one of
list_of_seqrecords2.
"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_length(records1, records2)
check_equal_list_seqrecords(records1, records2)
@classmethod
def assertEqualSpliceGraphs(self, sg1: dict, sg2: dict) -> None:
"""Check if two splice graph are equal:"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_equal_splice_graphs(sg1, sg2)
@classmethod
def assertEqualDictOfDF(
self, dict1: Dict[str, pd.DataFrame], dict2: Dict[str, pd.DataFrame]) -> None:
"""Check if two dicts of pd.DataFrame are equal"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_same_keys(dict1, dict2)
check_equal_df_dict_values(dict1, dict2)
@classmethod
def assertEqualDictOfSpliceGraphs(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts of nx.DiGraph and some data attached to nodes and edges are equal"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_dict_of_sg(dict1, dict2)
| mit |
kosz85/django | tests/staticfiles_tests/test_management.py | 33 | 18449 | import codecs
import datetime
import os
import shutil
import tempfile
import unittest
from io import StringIO
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.staticfiles import storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.test import override_settings
from django.test.utils import extend_sys_path
from django.utils import timezone
from django.utils._os import symlinks_supported
from django.utils.functional import empty
from .cases import CollectionTestCase, StaticFilesTestCase, TestDefaults
from .settings import TEST_ROOT, TEST_SETTINGS
from .storage import DummyStorage
class TestNoFilesCreated:
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestFindStatic(TestDefaults, CollectionTestCase):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
path = call_command('findstatic', filepath, all=False, verbosity=0, stdout=StringIO())
with codecs.open(path, "r", "utf-8") as f:
return f.read()
def test_all_files(self):
"""
findstatic returns all candidate files if run without --first and -v1.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=1, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertIn('project', lines[1])
self.assertIn('apps', lines[2])
def test_all_files_less_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v0.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=0, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertEqual(len(lines), 2)
self.assertIn('project', lines[0])
self.assertIn('apps', lines[1])
def test_all_files_more_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v2.
Also, test that findstatic returns the searched locations with -v2.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=2, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertIn('project', lines[1])
self.assertIn('apps', lines[2])
self.assertIn("Looking in the following locations:", lines[3])
searched_locations = ', '.join(lines[4:])
# AppDirectoriesFinder searched locations
self.assertIn(os.path.join('staticfiles_tests', 'apps', 'test', 'static'), searched_locations)
self.assertIn(os.path.join('staticfiles_tests', 'apps', 'no_label', 'static'), searched_locations)
# FileSystemFinder searched locations
self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][1][1], searched_locations)
self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][0], searched_locations)
# DefaultStorageFinder searched locations
self.assertIn(
os.path.join('staticfiles_tests', 'project', 'site_media', 'media'),
searched_locations
)
class TestConfiguration(StaticFilesTestCase):
def test_location_empty(self):
msg = 'without having set the STATIC_ROOT setting to a filesystem path'
err = StringIO()
for root in ['', None]:
with override_settings(STATIC_ROOT=root):
with self.assertRaisesMessage(ImproperlyConfigured, msg):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
def test_local_storage_detection_helper(self):
staticfiles_storage = storage.staticfiles_storage
try:
storage.staticfiles_storage._wrapped = empty
with self.settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage'):
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage._wrapped = empty
with self.settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'):
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
collectstatic.staticfiles_storage = storage.FileSystemStorage()
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
collectstatic.staticfiles_storage = DummyStorage()
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
finally:
staticfiles_storage._wrapped = empty
collectstatic.staticfiles_storage = staticfiles_storage
storage.staticfiles_storage = staticfiles_storage
class TestCollectionHelpSubcommand(AdminScriptTestCase):
@override_settings(STATIC_ROOT=None)
def test_missing_settings_dont_prevent_help(self):
"""
Even if the STATIC_ROOT setting is not set, one can still call the
`manage.py help collectstatic` command.
"""
self.write_settings('settings.py', apps=['django.contrib.staticfiles'])
out, err = self.run_manage(['help', 'collectstatic'])
self.assertNoOutput(err)
class TestCollection(TestDefaults, CollectionTestCase):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
-i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
"""
Test the ``--clear`` option of the ``collectstatic`` management command.
"""
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super().run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
def test_dir_not_exists(self, **kwargs):
shutil.rmtree(settings.STATIC_ROOT)
super().run_collectstatic(clear=True)
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.PathNotImplementedStorage')
def test_handle_path_notimplemented(self):
self.run_collectstatic()
self.assertFileNotFound('cleared.txt')
class TestInteractiveMessages(CollectionTestCase):
overwrite_warning_msg = "This will overwrite existing files!"
delete_warning_msg = "This will DELETE ALL FILES in this location!"
files_copied_msg = "static files copied"
@staticmethod
def mock_input(stdout):
def _input(msg):
stdout.write(msg)
return 'yes'
return _input
def test_warning_when_clearing_staticdir(self):
stdout = StringIO()
self.run_collectstatic()
with mock.patch('builtins.input', side_effect=self.mock_input(stdout)):
call_command('collectstatic', interactive=True, clear=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertIn(self.delete_warning_msg, output)
def test_warning_when_overwriting_files_in_staticdir(self):
stdout = StringIO()
self.run_collectstatic()
with mock.patch('builtins.input', side_effect=self.mock_input(stdout)):
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
def test_no_warning_when_staticdir_does_not_exist(self):
stdout = StringIO()
shutil.rmtree(settings.STATIC_ROOT)
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
self.assertIn(self.files_copied_msg, output)
def test_no_warning_for_empty_staticdir(self):
stdout = StringIO()
with tempfile.TemporaryDirectory(prefix='collectstatic_empty_staticdir_test') as static_dir:
with override_settings(STATIC_ROOT=static_dir):
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
self.assertIn(self.files_copied_msg, output)
class TestCollectionExcludeNoDefaultIgnore(TestDefaults, CollectionTestCase):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the
``collectstatic`` management command.
"""
def run_collectstatic(self):
super().run_collectstatic(use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
@override_settings(INSTALLED_APPS=[
'staticfiles_tests.apps.staticfiles_config.IgnorePatternsAppConfig',
'staticfiles_tests.apps.test',
])
class TestCollectionCustomIgnorePatterns(CollectionTestCase):
def test_custom_ignore_patterns(self):
"""
A custom ignore_patterns list, ['*.css'] in this case, can be specified
in an AppConfig definition.
"""
self.assertFileNotFound('test/nonascii.css')
self.assertFileContains('test/.hidden', 'should be ignored')
class TestCollectionDryRun(TestNoFilesCreated, CollectionTestCase):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super().run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in installed apps even if file modification
dates are in different order:
'staticfiles_test_app',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
# get modification and access times for no_label/static/file2.txt
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from a temporary app
# this file will have modification time older than no_label/static/file2.txt
# anyway it should be taken to STATIC_ROOT because the temporary app is before
# 'no_label' app in installed apps
self.temp_app_path = os.path.join(self.temp_dir, 'staticfiles_test_app')
self.testfile_path = os.path.join(self.temp_app_path, 'static', 'file2.txt')
os.makedirs(self.temp_app_path)
with open(os.path.join(self.temp_app_path, '__init__.py'), 'w+'):
pass
os.makedirs(os.path.dirname(self.testfile_path))
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
self.settings_with_test_app = self.modify_settings(
INSTALLED_APPS={'prepend': 'staticfiles_test_app'})
with extend_sys_path(self.temp_dir):
self.settings_with_test_app.enable()
super().setUp()
def tearDown(self):
super().tearDown()
self.settings_with_test_app.disable()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# The collectstatic test suite already has conflicting files since both
# project/test/file.txt and apps/test/static/test/file.txt are collected. To
# properly test for the warning not happening unless we tell it to explicitly,
# we remove the project directory and will add back a conflicting file later.
@override_settings(STATICFILES_DIRS=[])
class TestCollectionOverwriteWarning(CollectionTestCase):
"""
Test warning in ``collectstatic`` output when a file is skipped because a
previous file was already written to the same path.
"""
# If this string is in the collectstatic output, it means the warning we're
# looking for was emitted.
warning_string = 'Found another file'
def _collectstatic_output(self, **kwargs):
"""
Run collectstatic, and capture and return the output. We want to run
the command at highest verbosity, which is why we can't
just call e.g. BaseCollectionTestCase.run_collectstatic()
"""
out = StringIO()
call_command('collectstatic', interactive=False, verbosity=3, stdout=out, **kwargs)
return out.getvalue()
def test_no_warning(self):
"""
There isn't a warning if there isn't a duplicate destination.
"""
output = self._collectstatic_output(clear=True)
self.assertNotIn(self.warning_string, output)
def test_warning(self):
"""
There is a warning when there are duplicate destinations.
"""
with tempfile.TemporaryDirectory() as static_dir:
duplicate = os.path.join(static_dir, 'test', 'file.txt')
os.mkdir(os.path.dirname(duplicate))
with open(duplicate, 'w+') as f:
f.write('duplicate of file.txt')
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True)
self.assertIn(self.warning_string, output)
os.remove(duplicate)
# Make sure the warning went away again.
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True)
self.assertNotIn(self.warning_string, output)
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage')
class TestCollectionNonLocalStorage(TestNoFilesCreated, CollectionTestCase):
"""
Tests for a Storage that implements get_modified_time() but not path()
(#15035).
"""
def test_storage_properties(self):
# Properties of the Storage as described in the ticket.
storage = DummyStorage()
self.assertEqual(storage.get_modified_time('name'), datetime.datetime(1970, 1, 1, tzinfo=timezone.utc))
with self.assertRaisesMessage(NotImplementedError, "This backend doesn't support absolute paths."):
storage.path('name')
class TestCollectionNeverCopyStorage(CollectionTestCase):
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.NeverCopyRemoteStorage')
def test_skips_newer_files_in_remote_storage(self):
"""
collectstatic skips newer files in a remote storage.
run_collectstatic() in setUp() copies the static files, then files are
always skipped after NeverCopyRemoteStorage is activated since
NeverCopyRemoteStorage.get_modified_time() returns a datetime in the
future to simulate an unmodified file.
"""
stdout = StringIO()
self.run_collectstatic(stdout=stdout, verbosity=2)
output = stdout.getvalue()
self.assertIn("Skipping 'test.txt' (not modified)", output)
@unittest.skipUnless(symlinks_supported(), "Must be able to symlink to run this test.")
class TestCollectionLinks(TestDefaults, CollectionTestCase):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self, clear=False, link=True, **kwargs):
super().run_collectstatic(link=link, clear=clear, **kwargs)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt')))
def test_broken_symlink(self):
"""
Test broken symlink gets deleted.
"""
path = os.path.join(settings.STATIC_ROOT, 'test.txt')
os.unlink(path)
self.run_collectstatic()
self.assertTrue(os.path.islink(path))
def test_symlinks_and_files_replaced(self):
"""
Running collectstatic in non-symlink mode replaces symlinks with files,
while symlink mode replaces files with symlinks.
"""
path = os.path.join(settings.STATIC_ROOT, 'test.txt')
self.assertTrue(os.path.islink(path))
self.run_collectstatic(link=False)
self.assertFalse(os.path.islink(path))
self.run_collectstatic(link=True)
self.assertTrue(os.path.islink(path))
def test_clear_broken_symlink(self):
"""
With ``--clear``, broken symbolic links are deleted.
"""
nonexistent_file_path = os.path.join(settings.STATIC_ROOT, 'nonexistent.txt')
broken_symlink_path = os.path.join(settings.STATIC_ROOT, 'symlink.txt')
os.symlink(nonexistent_file_path, broken_symlink_path)
self.run_collectstatic(clear=True)
self.assertFalse(os.path.lexists(broken_symlink_path))
| bsd-3-clause |
nullie/python-hemi | tests/test_interoperability.py | 2 | 1426 | # -*- coding: utf-8 -*-
import hemi
def test_pass_back():
ctx = hemi.Context()
obj = ctx.eval("({1: 3})")
func = ctx.eval("(function(o) { return o[1] + 3 })")
val = func(obj)
assert val == 6
func2 = ctx.eval("(function(f) { return f({1: 5}) + 2 })")
val = func2(func)
assert val == 10
def test_modification():
ctx = hemi.Context()
d = ctx.eval("({1: 2})")
d.foo = 'bar'
f = ctx.eval("(function(d, k) { return d[k] })")
assert f(d, 'foo') == 'bar'
d['bar'] = 'baz'
assert f(d, 'bar') == 'baz'
assert d['bar'] == 'baz'
def test_object():
ctx = hemi.Context()
o = ctx.Object()
o.foo = 'bar'
assert o.foo == 'bar'
del o.foo
del o.qux
o['bar'] = 'baz'
del o['bar']
del o['qux']
assert o.qux is hemi.undefined
assert o['qux'] is hemi.undefined
def test_function():
ctx = hemi.Context()
def callable(this, *args):
return 'ok'
f = ctx.Function(callable)
rv = f()
assert rv == 'ok'
def test_unicode():
context = hemi.Context()
assert context.eval(u'"привет"') == u'привет'
context.eval("function f(arg) { return arg }")
assert context.locals.f(u'привет') == u'привет'
def test_longint():
context = hemi.Context()
context.eval("function f(arg) { return arg + 1 }")
assert context.locals.f(213L) == 214L
| bsd-3-clause |
JuBzzz/PyImageScripts | Scripts/labeler.py | 1 | 13464 | import tkinter as tk
from tkinter import ttk
from PIL import ImageFont, ImageDraw, Image
from matplotlib import font_manager
from ._helper import *
import os
DEFAULT_FONTS = ["Arial", "Helvetica", "Times New Roman", "Times",
"Courier New", "Verdana"]
MIN_FONT = 1
MAX_FONT = 10000
WHITE = (255, 255, 255, 255)
def _validate_to_number_range(StringVar, from_=0, to=255):
text = StringVar.get()
if len(text):
num = int(text)
if num < from_:
StringVar.set(str(from_))
elif num > to:
StringVar.set(str(to))
else:
StringVar.set(str(from_))
return StringVar.get()
def get_fonts_from_dir(path=''):
fonts = {}
if os.path.isdir(path):
font_paths = font_manager.list_fonts(path, ['ttf'])
font_list = font_manager.createFontList(font_paths)
for font in font_list:
fonts[font.name] = font.fname
return fonts
def make_label(image, bg, label_position, label_thickness):
if label_position in ["top", "bottom"]:
return Image.new('RGBA', (image.width, label_thickness), bg)
if label_position in ["left", "right"]:
return Image.new('RGBA', (label_thickness, image.height), bg)
def draw_text_on_label(label, text, font, text_color, v_align, h_align):
draw = ImageDraw.Draw(label)
text_size = draw.textsize(text, font=font)
if h_align == "left":
x = 0
if h_align == "center":
x = label.width // 2 - text_size[0] // 2
if h_align == "right":
x = label.width - text_size[0]
if v_align == "top":
y = 0
if v_align == "center":
y = label.height // 2 - text_size[1] // 2
if v_align == "bottom":
y = label.height - text_size[1]
draw.text((x, y), text=text, font=font, fill=text_color)
return draw
def compute_label_box_area(image, label, label_position):
if label_position == "top":
label_box = (0, 0, image.width, label.height)
if label_position == "bottom":
label_box = (0, image.height - label.height, image.width, image.height)
if label_position == "left":
label_box = (0, 0, label.width, image.height)
if label_position == "right":
label_box = (image.width - label.width, 0, image.width, image.height)
return label_box
def increase_image_canvas(image, label, label_position):
if label_position == "top":
labeled_image_size = (image.width, image.height + label.height)
image_box = (0, label.height, image.width, image.height + label.height)
if label_position == "left":
labeled_image_size = (image.width + label.width, image.height)
image_box = (label.width, 0, image.width + label.width, image.height)
if label_position == "bottom":
labeled_image_size = (image.width, image.height + label.height)
image_box = (0, 0, image.width, image.height)
if label_position == "right":
labeled_image_size = (image.width + label.width, image.height)
image_box = (0, 0, image.width, image.height)
labeled_image = Image.new(image.mode, labeled_image_size, WHITE)
labeled_image.paste(image, image_box)
return labeled_image
def run(image, text, font_size, font_file, text_color, bg, v_align, h_align,
label_thickness, label_position, label_over_image):
label = make_label(image, bg, label_position, label_thickness)
font = ImageFont.truetype(font_file, font_size)
draw = draw_text_on_label(label, text, font, text_color, v_align, h_align)
if not label_over_image:
image = increase_image_canvas(image, label, label_position)
label_box = compute_label_box_area(image, label, label_position)
image.paste(label, label_box, label)
return image
class widgets(tk.Frame):
def __init__(self, parent):
super(widgets, self).__init__(parent, relief=RELIEF, bd=BD, padx=PAD,
pady=PAD, height=HEIGHT, width=WIDTH)
val_digit = (parent.register(digits_validation),
'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
text_lbl = tk.Label(self, text="Text:")
text_lbl.grid(column=0, row=0, columnspan=12)
self.text_txt = tk.Text(self, height=3, width=35)
self.text_txt.grid(column=0, row=1, columnspan=12)
font_dir_lbl = tk.Label(self, text="Font\ndirectory: ")
font_dir_lbl.grid(column=0, row=2, columnspan=3)
self.font_directory = tk.StringVar(self)
font_dir_ent = tk.Entry(self, textvariable=self.font_directory)
font_dir_ent.grid(column=3, row=2, columnspan=6)
font_reset_btn = tk.Button(self, text="RESET",
command=self._load_default_fonts)
font_reset_btn.grid(column=8, row=2, columnspan=2)
font_dir_btn = tk.Button(self, text="OK", command=self._load_fonts)
font_dir_btn.grid(column=10, row=2, columnspan=2)
font_size_lbl = tk.Label(self, text="Size:")
font_size_lbl.grid(column=0, row=3, columnspan=2)
self.font_size = tk.StringVar(self, value="15")
font_size_ent = tk.Spinbox(self, textvariable=self.font_size, width=3,
from_=MIN_FONT, to=MAX_FONT, validate='key',
validatecommand=val_digit)
font_size_ent.grid(column=2, row=3, columnspan=2)
font_family_lbl = tk.Label(self, text="Font\nfamily: ")
font_family_lbl.grid(column=4, row=3, columnspan=3)
self.font_family_cmb = ttk.Combobox(self, state="readonly", width=16)
self.font_family_cmb.grid(column=7, row=3, columnspan=5)
self._load_default_fonts()
text_color_lbl = tk.Label(self, text="Text color:")
text_color_lbl.grid(column=0, row=4, columnspan=3)
text_color_frm = tk.Frame(self)
text_color_frm.grid(column=3, row=4, columnspan=9)
self.TEXT_RGBA = [tk.StringVar(self, value="0"),
tk.StringVar(self, value="0"),
tk.StringVar(self, value="0"),
tk.StringVar(self, value="255")]
text_red_lbl = tk.Label(text_color_frm, text="R:")
text_red_lbl.grid(column=0, row=0)
text_red = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[0],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_red.grid(column=1, row=0)
text_green_lbl = tk.Label(text_color_frm, text="G:")
text_green_lbl.grid(column=2, row=0)
text_green = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[1],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_green.grid(column=3, row=0)
text_blue_lbl = tk.Label(text_color_frm, text="B:")
text_blue_lbl.grid(column=4, row=0)
text_blue = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[2],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_blue.grid(column=5, row=0)
text_alpha_lbl = tk.Label(text_color_frm, text="A:")
text_alpha_lbl.grid(column=6, row=0)
text_alpha = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[3],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_alpha.grid(column=7, row=0)
v_align_lbl = tk.Label(self, text="Vertical\nalign:")
v_align_lbl.grid(column=0, row=5, columnspan=3)
self.v_align_cmb = ttk.Combobox(self, width=6, state="readonly",
values=["top", "center", "bottom"])
self.v_align_cmb.grid(column=3, row=5, columnspan=3)
self.v_align_cmb.set("top")
h_align_lbl = tk.Label(self, text="Horizontal\nalign:")
h_align_lbl.grid(column=6, row=5, columnspan=3)
self.h_align_cmb = ttk.Combobox(self, width=6, state="readonly",
values=["left", "center", "right"])
self.h_align_cmb.grid(column=9, row=5, columnspan=3)
self.h_align_cmb.set("left")
bg_color_lbl = tk.Label(self, text="Background\ncolor:")
bg_color_lbl.grid(column=0, row=6, columnspan=3)
bg_color_frm = tk.Frame(self)
bg_color_frm.grid(column=3, row=6, columnspan=9)
self.BG_RGBA = [tk.StringVar(self, value="255"),
tk.StringVar(self, value="255"),
tk.StringVar(self, value="255"),
tk.StringVar(self, value="255")]
bg_red_lbl = tk.Label(bg_color_frm, text="R:")
bg_red_lbl.grid(column=0, row=0)
bg_red = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[0],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_red.grid(column=1, row=0)
bg_green_lbl = tk.Label(bg_color_frm, text="G:")
bg_green_lbl.grid(column=2, row=0)
bg_green = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[1],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_green.grid(column=3, row=0)
bg_blue_lbl = tk.Label(bg_color_frm, text="B:")
bg_blue_lbl.grid(column=4, row=0)
bg_blue = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[2],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_blue.grid(column=5, row=0)
bg_alpha_lbl = tk.Label(bg_color_frm, text="A:")
bg_alpha_lbl.grid(column=6, row=0)
bg_alpha = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[3],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_alpha.grid(column=7, row=0)
label_thickness_lbl = tk.Label(self, text="Label\nthickness:")
label_thickness_lbl.grid(column=0, row=7, columnspan=3)
self.label_thickness = tk.StringVar(self, value="25")
label_thick_ent = tk.Spinbox(self, textvariable=self.label_thickness,
width=3, from_=MIN_FONT, to=MAX_FONT,
validate='key', validatecommand=val_digit)
label_thick_ent.grid(column=3, row=7, columnspan=2)
label_position_lbl = tk.Label(self, text="Label\nposition:")
label_position_lbl.grid(column=5, row=7, columnspan=3)
label_position_list = ["top", "bottom", "left", "right"]
self.label_position_cmb = ttk.Combobox(self, state="readonly", width=8,
values=label_position_list)
self.label_position_cmb.grid(column=8, row=7, columnspan=4)
self.label_position_cmb.set("bottom")
self.label_over_image = tk.IntVar()
label_over_image_chk = tk.Checkbutton(self, text="Label over\nimage",
variable=self.label_over_image)
label_over_image_chk.grid(column=0, row=8, columnspan=3)
def _load_fonts(self):
self.font_dict = get_fonts_from_dir(self.font_directory.get())
sorted_font_list = sorted(list(self.font_dict.keys()))
self.font_family_cmb['values'] = sorted_font_list
available_fonts = self.font_dict.keys()
for def_font in DEFAULT_FONTS:
current_font = self.font_family_cmb.get()
if current_font == "" and def_font in available_fonts:
self.font_family_cmb.set(def_font)
else:
if current_font == "":
self.font_family_cmb.set(list(available_fonts)[0])
def _load_default_fonts(self):
self.font_directory.set(font_manager.win32FontDirectory())
self._load_fonts()
def get_args(self):
text = self.text_txt.get("1.0",'end-1c')
font_size = int(_validate_to_number_range(self.font_size,
MIN_FONT, MAX_FONT))
font_file = self.font_dict[self.font_family_cmb.get()]
text_color = [int(_validate_to_number_range(value))
for value in self.TEXT_RGBA]
bg_color = [int(_validate_to_number_range(value))
for value in self.BG_RGBA]
v_align = self.v_align_cmb.get()
h_align = self.h_align_cmb.get()
label_thickness = int(_validate_to_number_range(self.label_thickness,
MIN_FONT, MAX_FONT))
label_position = self.label_position_cmb.get()
label_over_image = self.label_over_image.get()
## TODO: Allow the user to choose a different directory to load the
# font files from
return {"text": text,
"font_size": font_size,
"font_file": font_file,
"text_color": tuple(text_color),
"bg": tuple(bg_color),
"v_align": v_align,
"h_align": h_align,
"label_thickness": label_thickness,
"label_position": label_position,
"label_over_image": label_over_image
}
| mit |
ChugR/qpid-dispatch | tests/system_tests_http2.py | 3 | 31067 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
from time import sleep
import system_test
from system_test import TestCase, Qdrouterd, QdManager, Process, SkipIfNeeded
from subprocess import PIPE
def python_37_available():
if sys.version_info >= (3, 7):
return True
def curl_available():
popen_args = ['curl', '--version']
try:
process = Process(popen_args,
name='curl_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
return True
except:
return False
def quart_available():
"""
Checks if quart version is greater than 0.13
"""
popen_args = ['quart', '--version']
try:
process = Process(popen_args,
name='quart_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
parts = out.split(".")
major_version = parts[0]
if int(major_version[-1]) > 0 or int(parts[1]) >= 13:
return True
return False
except Exception as e:
print(e)
print("quart_not_available")
return False
def skip_test():
if python_37_available() and quart_available() and curl_available():
return False
return True
class Http2TestBase(TestCase):
def run_curl(self, args=None, regexp=None, timeout=system_test.TIMEOUT, address=None):
# Tell with -m / --max-time the maximum time, in seconds, that you
# allow the command line to spend before curl exits with a
# timeout error code (28).
local_args = ["--http2-prior-knowledge"]
if args:
local_args = args + ["--http2-prior-knowledge"]
popen_args = ['curl',
str(address),
'--max-time', str(timeout)] + local_args
p = self.popen(popen_args,
name='curl-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
if p.returncode != 0:
print(p.returncode)
print(out)
assert (p.returncode == 0)
return out
class CommonHttp2Tests():
"""
Common Base class containing all tests. These tests are run by all
topologies of routers.
"""
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
# Tests the HTTP2 head request
def test_head_request(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge --head
address = self.router_qdra.http_addresses[0]
out = self.run_curl(args=["--head"], address=address)
self.assertIn('HTTP/2 200', out)
self.assertIn('server: hypercorn-h2', out)
self.assertIn('content-type: text/html; charset=utf-8', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_request(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
out = self.run_curl(address=address)
i = 0
ret_string = ""
while (i < 1000):
ret_string += str(i) + ","
i += 1
self.assertIn(ret_string, out)
# @SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
# def test_large_get_request(self):
# Tests a large get request. Response is more than 50k which means it
# will span many qd_http2_buffer_t objects.
# Run curl 127.0.0.1:port/largeget --http2-prior-knowledge
# address = self.router_qdra.http_addresses[0] + "/largeget"
# out = self.run_curl(address=address)
# self.assertIn("49996,49997,49998,49999", out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_post_request(self):
# curl -d "fname=John&lname=Doe" -X POST 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(args=['-d', 'fname=John&lname=Doe', '-X', 'POST'], address=address)
self.assertIn('Success! Your first name is John, last name is Doe', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_delete_request(self):
# curl -X DELETE "http://127.0.0.1:9000/myinfo/delete/22122" -H "accept: application/json" --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo/delete/22122"
out = self.run_curl(args=['-X', 'DELETE'], address=address)
self.assertIn('{"fname": "John", "lname": "Doe", "id": "22122"}', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_put_request(self):
# curl -d "fname=John&lname=Doe" -X PUT 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(args=['-d', 'fname=John&lname=Doe', '-X', 'PUT'], address=address)
self.assertIn('Success! Your first name is John, last name is Doe', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_patch_request(self):
# curl -d "fname=John&lname=Doe" -X PATCH 127.0.0.1:9000/myinfo --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/patch"
out = self.run_curl(args=['--data', '{\"op\":\"add\",\"path\":\"/user\",\"value\":\"jane\"}', '-X', 'PATCH'], address=address)
self.assertIn('"op":"add"', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_404(self):
# Run curl 127.0.0.1:port/unavilable --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/unavilable"
out = self.run_curl(address=address)
self.assertIn('404 Not Found', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_500(self):
# Run curl 127.0.0.1:port/unavilable --http2-prior-knowledge
address = self.router_qdra.http_addresses[0] + "/test/500"
out = self.run_curl(address=address)
self.assertIn('500 Internal Server Error', out)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_image_png(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
passed = False
try:
address = self.router_qdra.http_addresses[0] + "/images/balanced-routing.png"
self.run_curl(address=address)
except UnicodeDecodeError as u:
if "codec can't decode byte 0x89" in str(u):
passed = True
self.assertTrue(passed)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_get_image_jpg(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
passed = False
try:
address = self.router_qdra.http_addresses[0] + "/images/apache.jpg"
self.run_curl(address=address)
except UnicodeDecodeError as u:
print(u)
if "codec can't decode byte 0xff" in str(u):
passed = True
self.assertTrue(passed)
def check_connector_delete(self, client_addr, server_addr):
# Run curl 127.0.0.1:port --http2-prior-knowledge
# We are first making sure that the http request goes thru successfully.
out = self.run_curl(address=client_addr)
# Run a qdmanage query on connections to see how many qdr_connections are
# there on the egress router
qd_manager = QdManager(self, address=server_addr)
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
self.assertGreaterEqual(len(connections), 2)
server_conn_found = False
for conn in connections:
if os.environ['SERVER_LISTEN_PORT'] in conn['name']:
server_conn_found = True
break
self.assertTrue(server_conn_found)
# Run a qdmanage DELETE on the httpConnector
http_connectors = qd_manager.query('org.apache.qpid.dispatch.httpConnector')
self.assertEqual(len(http_connectors), 1)
# Delete the httpConnector
qd_manager.delete("org.apache.qpid.dispatch.httpConnector", name=self.connector_name)
# Make sure the connector is gone
http_connectors = qd_manager.query('org.apache.qpid.dispatch.httpConnector')
self.assertEqual(len(http_connectors), 0)
# Deleting the connector must have taken out the connection to the server.
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
http_server_conn_found = False
for conn in connections:
if os.environ['SERVER_LISTEN_PORT'] in conn['name']:
server_conn_found = True
break
self.assertFalse(http_server_conn_found)
sleep(2)
# Now, run a curl client GET request with a timeout
request_timed_out = False
try:
out = self.run_curl(address=client_addr, timeout=5)
print(out)
except Exception as e:
request_timed_out = True
self.assertTrue(request_timed_out)
# Add back the httpConnector
# qdmanage CREATE type=httpConnector address=examples.com host=127.0.0.1 port=80 protocolVersion=HTTP2
create_result = qd_manager.create("org.apache.qpid.dispatch.httpConnector", self.connector_props)
num_tries = 2
tries = 0
conn_present = False
while tries < num_tries:
connections = qd_manager.query('org.apache.qpid.dispatch.connection')
tries += 1
if (len(connections) < 2):
sleep(2)
else:
conn_present = True
self.assertTrue(conn_present)
out = self.run_curl(address=client_addr)
ret_string = ""
i = 0
while (i < 1000):
ret_string += str(i) + ","
i += 1
self.assertIn(ret_string, out)
class Http2TestOneStandaloneRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneStandaloneRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-standalone-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_000_stats(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
qd_manager = QdManager(self, address=self.router_qdra.addresses[0])
# First request
out = self.run_curl(address=address)
# Second request
address = self.router_qdra.http_addresses[0] + "/myinfo"
out = self.run_curl(args=['-d', 'fname=Mickey&lname=Mouse', '-X', 'POST'], address=address)
self.assertIn('Success! Your first name is Mickey, last name is Mouse', out)
stats = qd_manager.query('org.apache.qpid.dispatch.httpRequestInfo')
self.assertEqual(len(stats), 2)
# Give time for the core thread to augment the stats.
i = 0
while i < 3:
if not stats or stats[0].get('requests') < 2:
i += 1
sleep(1)
stats = qd_manager.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
for s in stats:
self.assertEqual(s.get('requests'), 2)
self.assertEqual(s.get('details').get('GET:200'), 1)
self.assertEqual(s.get('details').get('POST:200'), 1)
if stats[0].get('direction') == 'out':
self.assertEqual(stats[1].get('direction'), 'in')
self.assertEqual(stats[0].get('bytesOut'), 24)
self.assertEqual(stats[0].get('bytesIn'), 3944)
self.assertEqual(stats[1].get('bytesOut'), 3944)
self.assertEqual(stats[1].get('bytesIn'), 24)
else:
self.assertEqual(stats[0].get('direction'), 'in')
self.assertEqual(stats[1].get('direction'), 'out')
self.assertEqual(stats[0].get('bytesOut'), 3944)
self.assertEqual(stats[0].get('bytesIn'), 24)
self.assertEqual(stats[1].get('bytesOut'), 24)
self.assertEqual(stats[1].get('bytesIn'), 3944)
class Http2TestOneEdgeRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneEdgeRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
class Http2TestOneInteriorRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestOneInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('httpConnector', cls.connector_props)
])
cls.router_qdra = cls.tester.qdrouterd(name, config, wait=True)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdra.addresses[0])
class Http2TestTwoRouter(Http2TestBase, CommonHttp2Tests):
@classmethod
def setUpClass(cls):
super(Http2TestTwoRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
name = "http2-test-router"
inter_router_port = cls.tester.get_port()
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('listener', {'role': 'inter-router', 'port': inter_router_port})
])
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
config_qdrb = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.B'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector', cls.connector_props),
('connector', {'name': 'connectorToA', 'role': 'inter-router',
'port': inter_router_port,
'verifyHostname': 'no'})
])
cls.router_qdra = cls.tester.qdrouterd(name, config_qdra, wait=True)
cls.router_qdrb = cls.tester.qdrouterd(name, config_qdrb, wait=True)
cls.router_qdra.wait_router_connected('QDR.B')
cls.router_qdrb.wait_router_connected('QDR.A')
sleep(2)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_000_stats(self):
# Run curl 127.0.0.1:port --http2-prior-knowledge
address = self.router_qdra.http_addresses[0]
qd_manager_a = QdManager(self, address=self.router_qdra.addresses[0])
stats_a = qd_manager_a.query('org.apache.qpid.dispatch.httpRequestInfo')
# First request
self.run_curl(address=address)
address = self.router_qdra.http_addresses[0] + "/myinfo"
# Second request
out = self.run_curl(args=['-d', 'fname=Mickey&lname=Mouse', '-X', 'POST'], address=address)
self.assertIn('Success! Your first name is Mickey, last name is Mouse', out)
# Give time for the core thread to augment the stats.
i = 0
while i < 3:
if not stats_a or stats_a[0].get('requests') < 2:
sleep(1)
i += 1
stats_a = qd_manager_a.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
self.assertEqual(len(stats_a), 1)
self.assertEqual(stats_a[0].get('requests'), 2)
self.assertEqual(stats_a[0].get('direction'), 'in')
self.assertEqual(stats_a[0].get('bytesOut'), 3944)
self.assertEqual(stats_a[0].get('bytesIn'), 24)
qd_manager_b = QdManager(self, address=self.router_qdrb.addresses[0])
stats_b = qd_manager_b.query('org.apache.qpid.dispatch.httpRequestInfo')
self.assertEqual(len(stats_b), 1)
i = 0
while i < 3:
s = stats_b[0]
if not stats_b or stats_b[0].get('requests') < 2:
i += 1
sleep(1)
stats_b = qd_manager_b.query('org.apache.qpid.dispatch.httpRequestInfo')
else:
break
self.assertEqual(stats_b[0].get('requests'), 2)
self.assertEqual(stats_b[0].get('direction'), 'out')
self.assertEqual(stats_b[0].get('bytesOut'), 24)
self.assertEqual(stats_b[0].get('bytesIn'), 3944)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdrb.addresses[0])
class Http2TestEdgeInteriorRouter(Http2TestBase, CommonHttp2Tests):
"""
The interior router connects to the HTTP2 server and the curl client
connects to the edge router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestEdgeInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
inter_router_port = cls.tester.get_port()
config_edgea = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpListener', {'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdrb = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
('httpConnector',
{'port': os.getenv('SERVER_LISTEN_PORT'), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'})
])
cls.router_qdrb = cls.tester.qdrouterd("interior-router", config_qdrb, wait=True)
cls.router_qdra = cls.tester.qdrouterd("edge-router", config_edgea)
sleep(3)
class Http2TestInteriorEdgeRouter(Http2TestBase, CommonHttp2Tests):
"""
The edge router connects to the HTTP2 server and the curl client
connects to the interior router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestInteriorEdgeRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
inter_router_port = cls.tester.get_port()
config_edge = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector',
{'port': os.getenv('SERVER_LISTEN_PORT'), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
('httpListener',
{'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
])
cls.router_qdra = cls.tester.qdrouterd("interior-router", config_qdra, wait=True)
cls.router_qdrb = cls.tester.qdrouterd("edge-router", config_edge)
sleep(3)
class Http2TestEdgeToEdgeViaInteriorRouter(Http2TestBase, CommonHttp2Tests):
"""
The edge router connects to the HTTP2 server and the curl client
connects to another edge router. The two edge routers are connected
via an interior router.
"""
@classmethod
def setUpClass(cls):
super(Http2TestEdgeToEdgeViaInteriorRouter, cls).setUpClass()
if skip_test():
return
cls.http2_server_name = "http2_server"
os.environ["QUART_APP"] = "http2server:app"
os.environ['SERVER_LISTEN_PORT'] = str(cls.tester.get_port())
cls.http2_server = cls.tester.http2server(name=cls.http2_server_name,
listen_port=int(os.getenv('SERVER_LISTEN_PORT')),
py_string='python3',
server_file="http2_server.py")
cls.connector_name = 'connectorToBeDeleted'
cls.connector_props = {
'port': os.getenv('SERVER_LISTEN_PORT'),
'address': 'examples',
'host': '127.0.0.1',
'protocolVersion': 'HTTP2',
'name': cls.connector_name
}
inter_router_port = cls.tester.get_port()
config_edge_b = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('httpConnector', cls.connector_props),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
config_qdra = Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'}),
('listener', {'role': 'edge', 'port': inter_router_port}),
])
config_edge_a = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'EDGE.B'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal',
'host': '0.0.0.0'}),
('httpListener',
{'port': cls.tester.get_port(), 'address': 'examples',
'host': '127.0.0.1', 'protocolVersion': 'HTTP2'}),
('connector', {'name': 'connectorToA', 'role': 'edge',
'port': inter_router_port,
'verifyHostname': 'no'})
])
cls.interior_qdr = cls.tester.qdrouterd("interior-router", config_qdra,
wait=True)
cls.router_qdra = cls.tester.qdrouterd("edge-router-a", config_edge_a)
cls.router_qdrb = cls.tester.qdrouterd("edge-router-b", config_edge_b)
sleep(5)
@SkipIfNeeded(skip_test(), "Python 3.7 or greater, Quart 0.13.0 or greater and curl needed to run http2 tests")
def test_zzz_http_connector_delete(self):
self.check_connector_delete(client_addr=self.router_qdra.http_addresses[0],
server_addr=self.router_qdrb.addresses[0])
| apache-2.0 |
mitsuhiko/django | tests/modeltests/files/tests.py | 3 | 4515 | from __future__ import with_statement
import shutil
import sys
import tempfile
from django.core.cache import cache
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from models import Storage, temp_storage, temp_storage_location
class FileTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
temp_storage.save('tests/default.txt', ContentFile('default content'))
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), "content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", "content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(
sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
)
# Files can be read in a little at a time, if necessary.
obj1.normal.open()
self.assertEqual(obj1.normal.read(3), "con")
self.assertEqual(obj1.normal.read(), "tent")
self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), ["co", "nt", "en", "t"])
obj1.normal.close()
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
# Default values allow an object to access a single file.
obj3 = Storage.objects.create()
self.assertEqual(obj3.default.name, "tests/default.txt")
self.assertEqual(obj3.default.read(), "default content")
obj3.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj3.delete()
obj3 = Storage()
self.assertEqual(obj3.default.read(), "default content")
obj3.default.close()
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj4 = Storage()
obj4.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj4.random.name.endswith("/random_file"))
# Clean up the temporary files and dir.
obj1.normal.delete()
obj2.normal.delete()
obj3.default.delete()
obj4.random.delete()
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
| bsd-3-clause |
MSK61/processorsim | src/sim_services/__init__.py | 1 | 22669 | # -*- coding: utf-8 -*-
"""sim_services package"""
############################################################
#
# Copyright 2017, 2019, 2020, 2021 Mohammed El-Afifi
# This file is part of processorSim.
#
# processorSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# processorSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with processorSim. If not, see
# <http://www.gnu.org/licenses/>.
#
# program: processor simulator
#
# file: __init__.py
#
# function: sim_services package
#
# description: sim_services package export file
#
# author: Mohammed El-Afifi (ME)
#
# environment: Visual Studdio Code 1.54.1, python 3.8.7, Fedora release
# 33 (Thirty Three)
#
# notes: This is a private program.
#
############################################################
import collections
import copy
from itertools import chain
import string
import typing
from typing import Dict, Iterable, Iterator, List, Mapping, MutableMapping, \
MutableSequence, Sequence, Tuple
import attr
from fastcore.foundation import Self
import more_itertools
from container_utils import BagValDict
from processor_utils import ProcessorDesc
import processor_utils.units
from processor_utils.units import LockInfo, UnitModel
from program_defs import HwInstruction
from reg_access import AccessType, RegAccessQueue, RegAccQBuilder
from str_utils import ICaseString
from . import _instr_sinks, _utils
from ._instr_sinks import IInstrSink
from .sim_defs import InstrState, StallState
_T = typing.TypeVar("_T")
class StallError(RuntimeError):
"""Stalled processor error"""
def __init__(self, msg_tmpl: str, stalled_state: object) -> None:
"""Create a stalled processor error.
`self` is this stalled processor error.
`msg_tmpl` is the error message format taking the stalled
processor state as a positional argument.
`stalled_state` is the stalled processor state.
"""
super().__init__(string.Template(msg_tmpl).substitute(
{self.STATE_KEY: stalled_state}))
self._stalled_state = stalled_state
@property
def processor_state(self) -> object:
"""Stalled processor state
`self` is this stalled processor error.
"""
return self._stalled_state
STATE_KEY = "state" # parameter key in message format
@attr.s(frozen=True)
class HwSpec:
"""Hardware specification"""
processor_desc: ProcessorDesc = attr.ib()
name_unit_map: Dict[ICaseString, UnitModel] = attr.ib(init=False)
@name_unit_map.default
def _(self) -> Dict[ICaseString, UnitModel]:
"""Build the name-to-unit mapping.
`self` is this hardware specification.
"""
models = chain(
self.processor_desc.in_ports, self.processor_desc.in_out_ports,
map(Self.model(), chain(self.processor_desc.out_ports,
self.processor_desc.internal_units)))
return {unit.name: unit for unit in models}
def simulate(program: Sequence[HwInstruction], hw_info: HwSpec) -> List[
BagValDict[ICaseString, InstrState]]:
"""Run the given program on the processor.
`program` is the program to run.
`hw_info` is the processor information.
The function returns the pipeline diagram.
"""
util_tbl: List[BagValDict[ICaseString, InstrState]] = []
acc_queues = _build_acc_plan(enumerate(program))
issue_rec = _IssueInfo()
prog_len = len(program)
while issue_rec.entered < prog_len or issue_rec.in_flight:
_run_cycle(program, acc_queues, hw_info, util_tbl, issue_rec)
return util_tbl
@attr.s
class _AcceptStatus:
"""Instruction acceptance status"""
accepted: bool = attr.ib(True, init=False)
mem_used: bool = attr.ib()
@attr.s
class _IssueInfo:
"""Instruction issue information record"""
def bump_input(self) -> None:
"""Increment the entered instructions index.
`self` is this issue information record.
"""
self._entered += 1
def pump_outputs(self, outputs: int) -> None:
"""Pump outputs out of the pipeline.
`self` is this issue information record.
`outputs` are the number of outputs to pump out of the pipeline.
"""
self._exited += outputs
@property
def entered(self) -> int:
"""Instruction index
`self` is this issue information record.
"""
return self._entered
@property
def in_flight(self) -> bool:
"""True if there're in-flight instructions, otherwise False
`self` is this issue information record.
"""
return self._exited < self._entered
_entered = attr.ib(0, init=False)
_exited = attr.ib(0, init=False)
@attr.s(auto_attribs=True, frozen=True)
class _RegAvailState:
"""Registers availability state"""
avail: bool
regs: Iterable[object]
@attr.s(auto_attribs=True, frozen=True)
class _TransitionUtil:
"""Utilization transition of a single unit between two pulses"""
old_util: typing.Collection[InstrState]
new_util: Iterable[InstrState]
def _accept_instr(
issue_rec: _IssueInfo, instr_categ: object,
input_iter: Iterator[UnitModel], util_info: BagValDict[
ICaseString, InstrState], accept_res: _AcceptStatus) -> None:
"""Try to accept the next instruction to an input unit.
`issue_rec` is the issue record.
`instr_categ` is the next instruction category.
`input_iter` is an iterator over the input processing units to
select from for issuing the instruction.
`util_info` is the unit utilization information.
`accept_res` is the instruction acceptance result.
The function tries to find an appropriate unit to issue the
instruction to. It then updates the utilization information.
"""
accept_res.accepted = False
more_itertools.consume(iter(lambda: _accept_in_unit(
input_iter, instr_categ, accept_res, util_info, issue_rec), True))
def _accept_in_unit(
input_iter: Iterator[UnitModel], instr_categ: object,
accept_res: _AcceptStatus, util_info:
BagValDict[ICaseString, InstrState], issue_rec: _IssueInfo) -> bool:
"""Try to accept the next instruction to the given unit.
`input_iter` is an iterator over the input processing units to
select from for issuing the instruction.
`instr_categ` is the next instruction category.
`accept_res` is the instruction acceptance result.
`util_info` is the unit utilization information.
`issue_rec` is the issue record.
The function returns whether no more input units should be attempted
to accept the instruction.
"""
try:
unit = next(input_iter)
except StopIteration:
return True
mem_access = unit.needs_mem(instr_categ)
if _utils.mem_unavail(accept_res.mem_used, mem_access) or _utils.unit_full(
unit.width, util_info[unit.name]):
return False
_issue_instr(util_info[unit.name], mem_access, issue_rec, accept_res)
accept_res.accepted = True
return True
def _add_access(instr: HwInstruction, instr_index: int,
builders: Mapping[object, RegAccQBuilder]) -> None:
"""Append the instruction access to the given plan.
`instr` is the instruction to append whose access to the access
plan.
`instr_index` is the instruction index.
`builders` are the registry access plan builders.
"""
_add_rd_access(instr_index, builders, instr.sources)
_add_wr_access(instr_index, builders[instr.destination])
def _add_rd_access(instr: int, builders: Mapping[object, RegAccQBuilder],
registers: Iterable[object]) -> None:
"""Register the read access of the given registers.
`instr` is the instruction index.
`builders` are the registry access plan builders.
`registers` are the registers which will be read-accessed.
"""
for reg in registers:
builders[reg].append(AccessType.READ, instr)
def _add_wr_access(instr: int, builder: RegAccQBuilder) -> None:
"""Register the write access of the given instruction.
`instr` is the instruction index.
`builder` is the access plan builder.
"""
builder.append(AccessType.WRITE, instr)
def _build_acc_plan(program: Iterable[Tuple[int, HwInstruction]]) -> Dict[
object, RegAccessQueue]:
"""Build the registry access plan through the program lifetime.
`program` is the program to build a registry access plan for.
The function returns the registry access plan.
"""
builders: typing.DefaultDict[
object, RegAccQBuilder] = collections.defaultdict(RegAccQBuilder)
for instr_index, instr in program:
_add_access(instr, instr_index, builders)
return {reg: builder.create() for reg, builder in builders.items()}
def _build_cap_map(inputs: Iterable[UnitModel]) -> Dict[
object, List[UnitModel]]:
"""Build the capability map for input units.
`inputs` are the input processing units.
"""
cap_map: Dict[object, List[UnitModel]] = {}
for unit in inputs:
for cap in unit.capabilities:
cap_map.setdefault(cap, []).append(unit)
return cap_map
def _calc_unstalled(instructions: Iterable[InstrState]) -> int:
"""Count the number of unstalled instructions.
`instructions` are the list of instructions to count unstalled ones
in.
"""
return more_itertools.quantify(
instructions, lambda instr: instr.stalled == StallState.NO_STALL)
def _chk_data_stall(
unit_locks: LockInfo, instr_index: object, instr: HwInstruction,
acc_queues: Mapping[object, RegAccessQueue], reqs_to_clear:
MutableMapping[object, MutableSequence[object]]) -> StallState:
"""Check if the instruction should have a data stall.
`unit_locks` are the unit lock information.
`instr_index` is the index of the instruction to check whose data
stall status.
`instr` is the instruction to check whose data stall status.
`acc_queues` are the planned access queues for registers.
`reqs_to_clear` are the requests to be cleared from the access
queues.
"""
avail_state = _regs_avail(unit_locks, instr_index, instr, acc_queues)
if not avail_state.avail:
return StallState.DATA
_update_clears(reqs_to_clear, avail_state.regs, instr_index)
return StallState.NO_STALL
def _chk_full_stall(
old_util: object, new_util: object, util_tbl: object) -> None:
"""Check if the whole processor has stalled.
`old_util` is the utilization information of the previous clock
pulse.
`new_util` is the utilization information of the current clock
pulse.
`util_tbl` is the utilization table.
The function analyzes old and new utilization information and throws
a StallError if a full stall is detected.
"""
if new_util == old_util:
raise StallError(
f"Processor stalled with utilization ${StallError.STATE_KEY}",
util_tbl)
def _chk_hazards(old_util: BagValDict[_T, InstrState], new_util:
Iterable[Tuple[_T, Iterable[InstrState]]], name_unit_map:
Mapping[_T, UnitModel], program: Sequence[HwInstruction],
acc_queues: Mapping[object, RegAccessQueue]) -> None:
"""Check different types of hazards.
`old_util` is the utilization information of the previous clock
pulse.
`new_util` is the utilization information of the current clock
pulse.
`name_unit_map` is the name-to-unit mapping.
`program` is the master instruction list.
`acc_queues` are the planned access queues for registers.
The function analyzes old and new utilization information and marks
stalled instructions appropriately according to idientified hazards.
"""
reqs_to_clear: Dict[object, MutableSequence[object]] = {}
for unit, new_unit_util in new_util:
_stall_unit(name_unit_map[unit].lock_info, _TransitionUtil(
old_util[unit], new_unit_util), program, acc_queues, reqs_to_clear)
items_to_clear = reqs_to_clear.items()
for reg, req_lst in items_to_clear:
for cur_req in req_lst:
acc_queues[reg].dequeue(cur_req)
def _chk_avail_regs(avail_regs: MutableSequence[Sequence[object]], acc_queues:
Mapping[object, RegAccessQueue], lock: bool, new_regs:
Sequence[object], req_params: Iterable[object]) -> bool:
"""Check if the given registers can be accessed.
`avail_regs` are the list of available registers.
`lock` is the locking flag.
`new_regs` are the potential registers to be added to the available
registers list.
`acc_queues` are the planned access queues for registers.
`req_params` are the request parameters.
"""
if not lock:
return True
if not all(acc_queues[reg].can_access(*req_params) for reg in new_regs):
return False
avail_regs.append(new_regs)
return True
def _clr_src_units(instructions: Iterable[_instr_sinks.HostedInstr],
util_info: BagValDict[ICaseString, _T]) -> None:
"""Clear the utilization of units releasing instructions.
`instructions` are the information of instructions being moved from
one unit to a predecessor, sorted by their program
index.
`util_info` is the unit utilization information.
The function clears the utilization information of units from which
instructions were moved to predecessor units.
"""
for cur_instr in instructions:
del util_info[cur_instr.host][cur_instr.index_in_host]
def _count_outputs(outputs: Iterable[ICaseString],
util_info: BagValDict[ICaseString, InstrState]) -> int:
"""Count the number of unstalled outputs.
`outputs` are all the output units.
`util_info` is the unit utilization information.
"""
return sum(_calc_unstalled(util_info[out_port]) for out_port in outputs)
def _fill_cp_util(
processor: ProcessorDesc, program: Sequence[HwInstruction], util_info:
BagValDict[ICaseString, InstrState], issue_rec: _IssueInfo) -> None:
"""Calculate the utilization of a new clock pulse.
`processor` is the processor to fill the utilization of whose units
at the current clock pulse.
`program` is the program to execute.
`util_info` is the unit utilization information to fill.
`issue_rec` is the issue record.
"""
in_units = chain(processor.in_out_ports, processor.in_ports)
dst_units = more_itertools.prepend(_instr_sinks.OutSink(_get_out_ports(
processor)), map(lambda dst: _instr_sinks.UnitSink(dst, program),
chain(processor.out_ports, processor.internal_units)))
_fill_inputs(
_build_cap_map(processor_utils.units.sorted_models(in_units)), program,
util_info, _mov_flights(dst_units, util_info), issue_rec)
def _fill_inputs(
cap_unit_map: Mapping[object, Iterable[UnitModel]], program: Sequence[
HwInstruction], util_info: BagValDict[ICaseString, InstrState],
mem_busy: bool, issue_rec: _IssueInfo) -> None:
"""Fetch new program instructions into the pipeline.
`cap_unit_map` is the mapping between capabilities and units.
`program` is the program to fill the input units from whose
instructions.
`util_info` is the unit utilization information.
`mem_busy` is the memory busy flag.
`issue_rec` is the issue record.
"""
prog_len = len(program)
accept_res = _AcceptStatus(mem_busy)
while issue_rec.entered < prog_len and accept_res.accepted:
_accept_instr(
issue_rec, program[issue_rec.entered].categ, iter(cap_unit_map.get(
program[issue_rec.entered].categ, [])), util_info, accept_res)
def _fill_unit(unit: IInstrSink, util_info:
BagValDict[ICaseString, InstrState], mem_busy: bool) -> bool:
"""Fill an output with instructions from its predecessors.
`unit` is the destination unit to fill.
`util_info` is the unit utilization information.
`mem_busy` is the memory busy flag.
The function returns a flag indicating if a memory access is
currently in progess.
"""
mov_res = unit.fill_unit(util_info, mem_busy)
_clr_src_units(sorted(
mov_res.moved, key=Self.index_in_host(), reverse=True), util_info)
return mov_res.mem_used
def _get_out_ports(processor: ProcessorDesc) -> Iterator[ICaseString]:
"""Find all units at the processor output boundary.
`processor` is the processor to find whose output ports.
The function returns an iterable over all port names at the output
boundary.
"""
return map(Self.name(), chain(processor.in_out_ports, map(
lambda port: port.model, processor.out_ports)))
def _issue_instr(instr_lst: MutableSequence[InstrState], mem_access: bool,
issue_rec: _IssueInfo, accept_res: _AcceptStatus) -> None:
"""Issue the next instruction to the issue list.
`instr_lst` is the list of hosted instructions in a unit.
`mem_access` is the hosting unit memory access flag.
`issue_rec` is the issue record.
`accept_res` is the instruction acceptance result.
"""
instr_lst.append(InstrState(issue_rec.entered))
issue_rec.bump_input()
if mem_access:
accept_res.mem_used = True
def _mov_flights(dst_units: Iterable[IInstrSink],
util_info: BagValDict[ICaseString, InstrState]) -> bool:
"""Move the instructions inside the pipeline.
`dst_units` are the destination processing units.
`util_info` is the unit utilization information.
The function returns a flag indicating if a memory access is
currently in progess.
"""
mem_busy = False
for cur_dst in dst_units:
if _fill_unit(cur_dst, util_info, mem_busy):
mem_busy = True
return mem_busy
def _regs_avail(
unit_locks: LockInfo, instr_index: object, instr: HwInstruction,
acc_queues: Mapping[object, RegAccessQueue]) -> _RegAvailState:
"""Check if all needed registers can be accessed.
`unit_locks` are the unit lock information.
`instr_index` is the index of the instruction to check whose access
to registers.
`instr` is the instruction to check whose access to registers.
`acc_queues` are the planned access queues for registers.
The function returns the registers availability state.
"""
avail_reg_lists: List[Sequence[object]] = []
return _RegAvailState(True, chain.from_iterable(avail_reg_lists)) if all(
_chk_avail_regs(avail_reg_lists, acc_queues, *chk_params) for
chk_params in
[(unit_locks.rd_lock, instr.sources, [AccessType.READ, instr_index]),
(unit_locks.wr_lock, [instr.destination],
[AccessType.WRITE, instr_index])]) else _RegAvailState(False, [])
def _regs_loaded(old_unit_util: Iterable[InstrState], instr: object) -> bool:
"""Check if the registers were previously loaded.
`old_unit_util` is the unit utilization information of the previous
clock pulse.
`instr` is the index of the instruction whose registers are to be
checked for being previously loaded.
"""
return typing.cast(bool, more_itertools.first_true(
old_unit_util, pred=lambda old_instr:
old_instr.instr == instr and old_instr.stalled != StallState.DATA))
def _run_cycle(program: Sequence[HwInstruction],
acc_queues: Mapping[object, RegAccessQueue], hw_info: HwSpec,
util_tbl: MutableSequence[BagValDict[ICaseString, InstrState]],
issue_rec: _IssueInfo) -> None:
"""Run a single clock cycle.
`program` is the program to run whose instructions.
`acc_queues` are the planned access queues for registers.
`hw_info` is the processor information.
`util_tbl` is the utilization table.
`issue_rec` is the issue record.
"""
old_util = util_tbl[-1] if util_tbl else BagValDict()
cp_util = copy.deepcopy(old_util)
_fill_cp_util(hw_info.processor_desc, program, cp_util, issue_rec)
_chk_hazards(
old_util, cp_util.items(), hw_info.name_unit_map, program, acc_queues)
_chk_full_stall(old_util, cp_util, util_tbl)
issue_rec.pump_outputs(
_count_outputs(_get_out_ports(hw_info.processor_desc), cp_util))
util_tbl.append(cp_util)
def _stall_unit(unit_locks: LockInfo, trans_util: _TransitionUtil,
program: Sequence[HwInstruction],
acc_queues: Mapping[object, RegAccessQueue], reqs_to_clear:
MutableMapping[object, MutableSequence[object]]) -> None:
"""Mark instructions in the given unit as stalled as needed.
`unit_locks` are the unit lock information.
`trans_util` is the unit utilization transition information of the
current and previous clock pulses.
`program` is the master instruction list.
`acc_queues` are the planned access queues for registers.
`reqs_to_clear` are the requests to be cleared from the access
queues.
"""
for instr in trans_util.new_util:
instr.stalled = StallState.STRUCTURAL if _regs_loaded(
trans_util.old_util, instr.instr) else _chk_data_stall(
unit_locks, instr.instr, program[instr.instr], acc_queues,
reqs_to_clear)
def _update_clears(reqs_to_clear: MutableMapping[object, MutableSequence[
object]], regs: Iterable[object], instr: object) -> None:
"""Update the list of register accesses to be cleared.
`reqs_to_clear` are the requests to be cleared from the access
queues.
`regs` are the register whose access queues will be updated.
`instr` is the index of the instruction to unstall.
"""
for clr_reg in regs:
reqs_to_clear.setdefault(clr_reg, []).append(instr)
| lgpl-3.0 |
jokerfr9/DragonsKernel_Kylessopen | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
sorenk/ansible | lib/ansible/modules/packaging/os/portage.py | 43 | 14682 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, William L Thomson Jr
# (c) 2013, Yap Sok Ann
# Written by Yap Sok Ann <sokann@gmail.com>
# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
# Based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: portage
short_description: Package manager for Gentoo
description:
- Manages Gentoo packages
version_added: "1.6"
options:
package:
description:
- Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
state:
description:
- State of the package atom
default: "present"
choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
update:
description:
- Update packages to the best version available (--update)
type: bool
default: 'no'
deep:
description:
- Consider the entire dependency tree of packages (--deep)
type: bool
default: 'no'
newuse:
description:
- Include installed packages where USE flags have changed (--newuse)
type: bool
default: 'no'
changed_use:
description:
- Include installed packages where USE flags have changed, except when
- flags that the user has not enabled are added or removed
- (--changed-use)
type: bool
default: 'no'
version_added: 1.8
oneshot:
description:
- Do not add the packages to the world file (--oneshot)
type: bool
default: 'no'
noreplace:
description:
- Do not re-emerge installed packages (--noreplace)
type: bool
default: 'no'
nodeps:
description:
- Only merge packages but not their dependencies (--nodeps)
type: bool
default: 'no'
onlydeps:
description:
- Only merge packages' dependencies but not the packages (--onlydeps)
type: bool
default: 'no'
depclean:
description:
- Remove packages not needed by explicitly merged packages (--depclean)
- If no package is specified, clean up the world's dependencies
- Otherwise, --depclean serves as a dependency aware version of --unmerge
type: bool
default: 'no'
quiet:
description:
- Run emerge in quiet mode (--quiet)
type: bool
default: 'no'
verbose:
description:
- Run emerge in verbose mode (--verbose)
type: bool
default: 'no'
sync:
description:
- Sync package repositories first
- If yes, perform "emerge --sync"
- If web, perform "emerge-webrsync"
choices: [ "web", "yes", "no" ]
getbinpkg:
description:
- Prefer packages specified at PORTAGE_BINHOST in make.conf
type: bool
default: 'no'
usepkgonly:
description:
- Merge only binaries (no compiling). This sets getbinpkg=yes.
type: bool
default: 'no'
keepgoing:
description:
- Continue as much as possible after an error.
type: bool
default: 'no'
version_added: 2.3
jobs:
description:
- Specifies the number of packages to build simultaneously.
- "Since version 2.6: Value of 0 or False resets any previously added"
- --jobs setting values
version_added: 2.3
loadavg:
description:
- Specifies that no new builds should be started if there are
- other builds running and the load average is at least LOAD
- "Since version 2.6: Value of 0 or False resets any previously added"
- --load-average setting values
version_added: 2.3
quietbuild:
description:
- Redirect all build output to logs alone, and do not display it
- on stdout (--quiet-build)
type: bool
default: 'no'
version_added: 2.6
quietfail:
description:
- Suppresses display of the build log on stdout (--quiet-fail)
- Only the die message and the path of the build log will be
- displayed on stdout.
type: bool
default: 'no'
version_added: 2.6
requirements: [ gentoolkit ]
author:
- "William L Thomson Jr (@wltjr)"
- "Yap Sok Ann (@sayap)"
- "Andrew Udvare"
'''
EXAMPLES = '''
# Make sure package foo is installed
- portage:
package: foo
state: present
# Make sure package foo is not installed
- portage:
package: foo
state: absent
# Update package foo to the "latest" version ( os specific alternative to latest )
- portage:
package: foo
update: yes
# Install package foo using PORTAGE_BINHOST setup
- portage:
package: foo
getbinpkg: yes
# Re-install world from binary packages only and do not allow any compiling
- portage:
package: '@world'
usepkgonly: yes
# Sync repositories and update world
- portage:
package: '@world'
update: yes
deep: yes
sync: yes
# Remove unneeded packages
- portage:
depclean: yes
# Remove package foo if it is not explicitly needed
- portage:
package: foo
state: absent
depclean: yes
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def query_package(module, package, action):
if package.startswith('@'):
return query_set(module, package, action)
return query_atom(module, package, action)
def query_atom(module, atom, action):
cmd = '%s list %s' % (module.equery_path, atom)
rc, out, err = module.run_command(cmd)
return rc == 0
def query_set(module, set, action):
system_sets = [
'@live-rebuild',
'@module-rebuild',
'@preserved-rebuild',
'@security',
'@selected',
'@system',
'@world',
'@x11-module-rebuild',
]
if set in system_sets:
if action == 'unmerge':
module.fail_json(msg='set %s cannot be removed' % set)
return False
world_sets_path = '/var/lib/portage/world_sets'
if not os.path.exists(world_sets_path):
return False
cmd = 'grep %s %s' % (set, world_sets_path)
rc, out, err = module.run_command(cmd)
return rc == 0
def sync_repositories(module, webrsync=False):
if module.check_mode:
module.exit_json(msg='check mode not supported by sync')
if webrsync:
webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
cmd = '%s --quiet' % webrsync_path
else:
cmd = '%s --sync --quiet --ask=n' % module.emerge_path
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg='could not sync package repositories')
# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
# in one go. If that is not desirable, split the packages into multiple tasks
# instead of joining them together with comma.
def emerge_packages(module, packages):
"""Run emerge command against given list of atoms."""
p = module.params
if not (p['update'] or p['noreplace'] or p['state'] == 'latest'):
for package in packages:
if not query_package(module, package, 'emerge'):
break
else:
module.exit_json(changed=False, msg='Packages already present.')
if module.check_mode:
module.exit_json(changed=True, msg='Packages would be installed.')
args = []
emerge_flags = {
'update': '--update',
'deep': '--deep',
'newuse': '--newuse',
'changed_use': '--changed-use',
'oneshot': '--oneshot',
'noreplace': '--noreplace',
'nodeps': '--nodeps',
'onlydeps': '--onlydeps',
'quiet': '--quiet',
'verbose': '--verbose',
'getbinpkg': '--getbinpkg',
'usepkgonly': '--usepkgonly',
'usepkg': '--usepkg',
'keepgoing': '--keep-going',
'quietbuild': '--quiet-build',
'quietfail': '--quiet-fail',
}
for flag, arg in emerge_flags.items():
if p[flag]:
args.append(arg)
if p['state'] and p['state'] == 'latest':
args.append("--update")
if p['usepkg'] and p['usepkgonly']:
module.fail_json(msg='Use only one of usepkg, usepkgonly')
emerge_flags = {
'jobs': '--jobs',
'loadavg': '--load-average',
}
for flag, arg in emerge_flags.items():
flag_val = p[flag]
if flag_val is None:
"""Fallback to default: don't use this argument at all."""
continue
if not flag_val:
"""If the value is 0 or 0.0: add the flag, but not the value."""
args.append(arg)
continue
"""Add the --flag=value pair."""
args.extend((arg, to_native(flag_val)))
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages not installed.',
)
# Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
# this error
if (p['usepkgonly'] or p['getbinpkg']) \
and 'Permission denied (publickey).' in err:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Please check your PORTAGE_BINHOST configuration in make.conf '
'and your SSH authorized_keys file',
)
changed = True
for line in out.splitlines():
if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
msg = 'Packages installed.'
break
elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
msg = 'Packages would be installed.'
break
else:
changed = False
msg = 'No packages installed.'
module.exit_json(
changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg=msg,
)
def unmerge_packages(module, packages):
p = module.params
for package in packages:
if query_package(module, package, 'unmerge'):
break
else:
module.exit_json(changed=False, msg='Packages already absent.')
args = ['--unmerge']
for flag in ['quiet', 'verbose']:
if p[flag]:
args.append('--%s' % flag)
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages not removed.',
)
module.exit_json(
changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages removed.',
)
def cleanup_packages(module, packages):
p = module.params
if packages:
for package in packages:
if query_package(module, package, 'unmerge'):
break
else:
module.exit_json(changed=False, msg='Packages already absent.')
args = ['--depclean']
for flag in ['quiet', 'verbose']:
if p[flag]:
args.append('--%s' % flag)
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
removed = 0
for line in out.splitlines():
if not line.startswith('Number removed:'):
continue
parts = line.split(':')
removed = int(parts[1].strip())
changed = removed > 0
module.exit_json(
changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Depclean completed.',
)
def run_emerge(module, packages, *args):
args = list(args)
args.append('--ask=n')
if module.check_mode:
args.append('--pretend')
cmd = [module.emerge_path] + args + packages
return cmd, module.run_command(cmd)
portage_present_states = ['present', 'emerged', 'installed', 'latest']
portage_absent_states = ['absent', 'unmerged', 'removed']
def main():
module = AnsibleModule(
argument_spec=dict(
package=dict(default=None, aliases=['name'], type='list'),
state=dict(
default=portage_present_states[0],
choices=portage_present_states + portage_absent_states,
),
update=dict(default=False, type='bool'),
deep=dict(default=False, type='bool'),
newuse=dict(default=False, type='bool'),
changed_use=dict(default=False, type='bool'),
oneshot=dict(default=False, type='bool'),
noreplace=dict(default=False, type='bool'),
nodeps=dict(default=False, type='bool'),
onlydeps=dict(default=False, type='bool'),
depclean=dict(default=False, type='bool'),
quiet=dict(default=False, type='bool'),
verbose=dict(default=False, type='bool'),
sync=dict(default=None, choices=['yes', 'web', 'no']),
getbinpkg=dict(default=False, type='bool'),
usepkgonly=dict(default=False, type='bool'),
usepkg=dict(default=False, type='bool'),
keepgoing=dict(default=False, type='bool'),
jobs=dict(default=None, type='int'),
loadavg=dict(default=None, type='float'),
quietbuild=dict(default=False, type='bool'),
quietfail=dict(default=False, type='bool'),
),
required_one_of=[['package', 'sync', 'depclean']],
mutually_exclusive=[
['nodeps', 'onlydeps'],
['quiet', 'verbose'],
['quietbuild', 'verbose'],
['quietfail', 'verbose'],
],
supports_check_mode=True,
)
module.emerge_path = module.get_bin_path('emerge', required=True)
module.equery_path = module.get_bin_path('equery', required=True)
p = module.params
if p['sync'] and p['sync'].strip() != 'no':
sync_repositories(module, webrsync=(p['sync'] == 'web'))
if not p['package']:
module.exit_json(msg='Sync successfully finished.')
packages = []
if p['package']:
packages.extend(p['package'])
if p['depclean']:
if packages and p['state'] not in portage_absent_states:
module.fail_json(
msg='Depclean can only be used with package when the state is '
'one of: %s' % portage_absent_states,
)
cleanup_packages(module, packages)
elif p['state'] in portage_present_states:
emerge_packages(module, packages)
elif p['state'] in portage_absent_states:
unmerge_packages(module, packages)
if __name__ == '__main__':
main()
| gpl-3.0 |
mstojcevich/pyFlash | robot.py | 2 | 3695 | import wpilib
from subsystem.drivetrain import DriveTrain
from handlers.autonomous import AutonHandler
from threading import Timer
from subsystem.lifter import Lifter
from oi import OI
TEST_BENCH = False # Whether we're using the test chassis
class FlashRobot(wpilib.IterativeRobot):
def robotInit(self):
"""
Used as a constructor
"""
'''
Hehehehe 'stack' overflow
def stack_totes():
return stack_totes()
'''
'''
Stacking mechanism
stack = [0, 1, 2]
stack.append(tote)
'''
# Initialize Subsystems
self.drivetrain = DriveTrain(self, testBench=TEST_BENCH)
self.drivetrain.zero()
wpilib.SmartDashboard.putData(self.drivetrain)
if not TEST_BENCH:
self.lifter = Lifter(self)
wpilib.SmartDashboard.putData(self.lifter)
self.oi = OI(self) # This line must be after the subsystems are initialized
self.autonHandler = AutonHandler(self)
self.mainDriverStick = self.oi.driver_joystick
self.copilotStick = self.oi.copilot_joystick
self.wasTurning = False
def autonomousInit(self):
self.autonHandler.start()
self.drivetrain.drive.drive(-1, 0.0)
wpilib.Timer.delay(2.0)
self.drivetrain.drive.drive(0, 0)
def teleopInit(self):
self.autonHandler.end()
def log(self):
self.drivetrain.log()
def autonomousPeriodic(self):
self.log()
def teleopPeriodic(self):
self.operatorControl()
self.log()
def operatorControl(self):
"""Runs the motors with arcade steering."""
self.drivetrain.drive.setSafetyEnabled(True)
while self.isOperatorControl() and self.isEnabled():
# Float value, between -1 and 1, representing a forced turn amount. Used for gyro.
forcedTurn = None
if abs(self.mainDriverStick.getZ()) > 0.05: # We're turning, 0.05 is a deadzone
self.wasTurning = True
else:
if self.drivetrain.navX is not None:
angle = self.drivetrain.navX.getYaw()
angleDiff = (angle + 180) % 360 - 180 # How far the angle is from 0 TODO verify my math
if abs(angleDiff) > 10: # TODO check if 10 is a good deadzone
kp = 0.03 # Proportional constant for how much to turn based on angle offset
forcedTurn = -angle*kp
else:
forcedTurn = None # They were in the deadzone, and gyro is center, so force no turn at all
if abs(self.mainDriverStick.getZ()) < 0.05 and self.wasTurning: # We were turning, now we've stopped
self.wasTurning = False
Timer(0.75, lambda: self.drivetrain.navX.zero()).start() # Zero the gyro in 0.75 seconds. Need to tune the time.
if forcedTurn is not None:
self.drivetrain.arcadeDrive(self.mainDriverStick, rotateAxis=2, invertTurn=True, rotateValue=forcedTurn) # 2 is horizontal on the right stick
else:
self.drivetrain.arcadeDrive(self.mainDriverStick, invertTurn=True, rotateAxis=2) # 2 is horizontal on the right stick
if not TEST_BENCH:
self.drivetrain.arcadeStrafe(self.mainDriverStick)
self.lifter.arcadeDrive(self.copilotStick)
wpilib.Timer.delay(.005) # give time for the motor to update
if __name__ == "__main__":
import codecs
import sys
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
wpilib.run(FlashRobot)
| mit |
deepesch/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
glneo/gnuradio-davisaf | grc/base/ParseXML.py | 35 | 3369 | """
Copyright 2008 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from lxml import etree
from . import odict
class XMLSyntaxError(Exception):
def __init__(self, error_log):
self._error_log = error_log
def __str__(self):
return '\n'.join(map(str, self._error_log.filter_from_errors()))
def validate_dtd(xml_file, dtd_file=None):
"""
Validate an xml file against its dtd.
@param xml_file the xml file
@param dtd_file the optional dtd file
@throws Exception validation fails
"""
#perform parsing, use dtd validation if dtd file is not specified
parser = etree.XMLParser(dtd_validation=not dtd_file)
xml = etree.parse(xml_file, parser=parser)
if parser.error_log: raise XMLSyntaxError(parser.error_log)
#perform dtd validation if the dtd file is specified
if not dtd_file: return
dtd = etree.DTD(dtd_file)
if not dtd.validate(xml.getroot()): raise XMLSyntaxError(dtd.error_log)
def from_file(xml_file):
"""
Create nested data from an xml file using the from xml helper.
@param xml_file the xml file path
@return the nested data
"""
xml = etree.parse(xml_file).getroot()
return _from_file(xml)
def _from_file(xml):
"""
Recursivly parse the xml tree into nested data format.
@param xml the xml tree
@return the nested data
"""
tag = xml.tag
if not len(xml):
return odict({tag: xml.text or ''}) #store empty tags (text is None) as empty string
nested_data = odict()
for elem in xml:
key, value = _from_file(elem).items()[0]
if nested_data.has_key(key): nested_data[key].append(value)
else: nested_data[key] = [value]
#delistify if the length of values is 1
for key, values in nested_data.iteritems():
if len(values) == 1: nested_data[key] = values[0]
return odict({tag: nested_data})
def to_file(nested_data, xml_file):
"""
Write an xml file and use the to xml helper method to load it.
@param nested_data the nested data
@param xml_file the xml file path
"""
xml = _to_file(nested_data)[0]
open(xml_file, 'w').write(etree.tostring(xml, xml_declaration=True, pretty_print=True))
def _to_file(nested_data):
"""
Recursivly parse the nested data into xml tree format.
@param nested_data the nested data
@return the xml tree filled with child nodes
"""
nodes = list()
for key, values in nested_data.iteritems():
#listify the values if not a list
if not isinstance(values, (list, set, tuple)):
values = [values]
for value in values:
node = etree.Element(key)
if isinstance(value, (str, unicode)): node.text = value
else: node.extend(_to_file(value))
nodes.append(node)
return nodes
if __name__ == '__main__':
"""Use the main method to test parse xml's functions."""
pass
| gpl-3.0 |
bhargav2408/python-for-android | python3-alpha/python3-src/Lib/encodings/cp1253.py | 272 | 13094 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\ufffe' # 0xAA -> UNDEFINED
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe' # 0xD2 -> UNDEFINED
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
nwchandler/ansible | contrib/inventory/stacki.py | 78 | 6285 | #!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
try:
import requests
except:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
bourreauEric/or-tools | examples/python/data/nonogram_regular/nonogram_gondola.py | 74 | 2187 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Gondola
# From http://www.conceptispuzzles.com
#
rows = 30
row_rule_len = 8
row_rules = [
[0,0,0,0,0,0,5,6],
[0,0,0,0,6,1,1,1],
[0,0,0,0,0,3,11,3],
[0,0,6,1,1,1,1,1],
[0,7,1,1,1,2,1,3],
[0,0,4,1,1,2,1,4],
[0,7,1,1,1,2,3,1],
[0,0,7,1,1,3,1,1],
[0,0,4,1,1,1,1,9],
[0,0,0,0,4,8,1,1],
[0,0,0,4,1,4,1,3],
[0,0,0,4,1,7,1,5],
[4,1,1,2,1,4,1,1],
[0,0,0,4,9,2,1,2],
[0,0,4,1,3,1,2,1],
[0,0,4,1,6,1,1,1],
[0,0,0,0,4,8,3,1],
[0,0,0,0,10,3,5,3],
[0,0,4,1,2,3,5,2],
[0,0,0,0,3,5,2,8],
[0,0,0,2,6,3,1,1],
[0,0,0,0,0,1,12,1],
[0,0,0,0,0,20,1,1],
[0,0,0,0,0,0,2,25],
[0,0,0,0,0,2,3,20],
[2,5,3,2,2,2,2,1],
[0,0,0,0,0,1,2,22],
[0,0,0,0,0,0,0,20],
[0,0,0,0,0,0,3,18],
[0,0,0,0,0,0,1,2]
]
cols = 30
col_rule_len = 8
col_rules = [
[0,0,2,2,2,1,2,1],
[0,0,0,2,2,2,1,2],
[0,0,0,2,2,2,3,1],
[0,0,0,0,0,18,2,1],
[0,0,0,0,0,23,1,1],
[0,0,0,0,0,20,2,1],
[0,0,0,0,0,0,16,4],
[0,0,0,0,0,0,2,6],
[0,0,0,0,0,1,7,8],
[0,0,3,1,1,8,2,1],
[0,0,0,1,1,7,9,1],
[0,0,0,0,7,1,1,15],
[0,0,1,1,3,1,12,3],
[0,1,1,1,1,3,2,8],
[0,1,1,1,2,3,4,8],
[0,1,1,1,1,3,1,14],
[0,0,0,0,7,6,8,3],
[0,0,0,0,0,1,4,9],
[0,0,0,1,2,1,1,7],
[0,0,0,0,5,1,3,3],
[0,0,0,0,0,2,1,6],
[0,0,0,0,0,5,2,6],
[0,0,0,0,1,4,2,3],
[0,0,0,0,0,1,7,8],
[0,0,0,0,7,4,5,6],
[2,1,1,1,2,3,3,3],
[0,0,0,7,2,1,1,6],
[0,1,1,2,1,1,1,6],
[0,2,1,1,1,3,2,3],
[0,0,0,0,1,1,9,6]
]
| apache-2.0 |
akhilpm/Masters-Project | kpcaWithTreeFS/mnistKPCA.py | 1 | 3216 | '''
KPCA based feature engineering for MNIST handwritten digits classification
Author : Akhil P M
Kernel used : Arc-cosine Kernel
'''
import numpy as np
import matplotlib.pyplot as plt
import time
from sklearn import svm, datasets
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets.mldata import fetch_mldata
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
def compute_J(N, theta):
if N == 0:
return np.pi - theta
elif N == 1:
return np.sin(theta) + (np.pi - theta) * np.cos(theta)
elif N == 2:
return 3*np.sin(theta)*np.cos(theta) + (np.pi - theta)*(1 + 2*pow(np.cos(theta), 2))
elif N == 3:
return 4*pow(np.sin(theta), 3) + 15*np.sin(theta)*pow(np.cos(theta), 2) + \
(np.pi- theta)*(9*pow(np.sin(theta),2)*np.cos(theta) + 15*pow(np.cos(theta),3))
else:
return np.zeros(theta.shape)
def arc_cosine_vector(X, Y):
"""param = a vector of n(degree) values at each layer """
param = np.array([1,1,1])
no_of_layers = len(param)
M = np.dot(X, Y.T)
temp1 = np.diag(np.dot(X, X.T))
temp2 = np.diag(np.dot(Y, Y.T))
for i in xrange(no_of_layers):
norm_matrix = np.outer(temp1,temp2) #the matix of k_xx, and K_yy's
theta = np.arccos( np.maximum( np.minimum(M/np.sqrt(norm_matrix), 1.0), -1.0))
n_l = param[i]
M = np.multiply(np.power(norm_matrix, n_l/2.0), compute_J(n_l, theta)) / np.pi
if i < no_of_layers-1:
zero1 = np.zeros(len(temp1))
zero2 = np.zeros(len(temp2))
temp1 = np.multiply(np.power(temp1, n_l), compute_J(n_l, zero1)) / np.pi
temp2 = np.multiply(np.power(temp2, n_l), compute_J(n_l, zero2)) / np.pi
return M
def arc_cosine(X, Y):
lenX = X.shape[0]
incr = 1000
M = np.zeros((lenX, Y.shape[0]))
for i in range(0,lenX,incr):
M[i:i+incr] = arc_cosine_vector(X[i:i+incr], Y)
return M
def main():
#set the timer
start = time.time()
#load the data
mnist = fetch_mldata('MNIST original')
mnist.target = mnist.target.astype(np.int32)
seed = np.random.randint(1,30000)
rand = np.random.RandomState(seed)
items = len(mnist.target)
indices = rand.randint(items, size = 70000)
trindex = indices[0:30000]
tsindex = indices[30000:]
#scale down features to the range [0, 1]
mnist.data = mnist.data/255.0
mnist.data = mnist.data.astype(np.float32)
trainX = mnist.data[trindex]
testX = mnist.data[tsindex]
trainY = mnist.target[trindex]
testY = mnist.target[tsindex]
#extract the features using KPCA
kpca = KernelPCA(kernel='precomputed')
kpca_train = arc_cosine(trainX[0:1000], trainX[0:1000])
#Fit the model from data in X
kpca.fit(kpca_train)
kernel_train = arc_cosine(trainX, trainX[0:1000])
kernel_test = arc_cosine(testX, trainX[0:1000])
trainX_kpca = kpca.transform(kernel_train)
testX_kpca = kpca.transform(kernel_test)
print testX_kpca.shape
#fit the svm model and compute accuaracy measure
clf = svm.SVC(kernel=arc_cosine)
clf.fit(trainX_kpca, trainY)
pred = clf.predict(testX_kpca)
print accuracy_score(testY, pred)
print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
print('Test Time : %f Minutes\n' %((time.time()-start)/60))
if __name__ == '__main__':
main() | mit |
gunan/tensorflow | tensorflow/python/util/tf_should_use.py | 3 | 8568 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that provides a warning if the wrapped object is never used."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import traceback
import six # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_decorator
# pylint: enable=g-bad-import-order,g-import-not-at-top
class _TFShouldUseHelper(object):
"""Object stored in TFShouldUse-wrapped objects.
When it is deleted it will emit a warning or error if its `sate` method
has not been called by time of deletion, and Tensorflow is not executing
eagerly or inside a tf.function (which use autodeps and resolve the
main issues this wrapper warns about).
"""
def __init__(self, type_, repr_, stack_frame, error_in_function,
warn_in_eager):
self._type = type_
self._repr = repr_
self._stack_frame = stack_frame
self._error_in_function = error_in_function
if context.executing_eagerly():
# If warn_in_eager, sated == False. Otherwise true.
self._sated = not warn_in_eager
elif ops.inside_function():
if error_in_function:
self._sated = False
ops.add_exit_callback_to_default_func_graph(
lambda: self._check_sated(raise_error=True))
else:
self._sated = True
else:
# TF1 graph building mode
self._sated = False
def sate(self):
self._sated = True
self._type = None
self._repr = None
self._stack_frame = None
self._logging_module = None
def _check_sated(self, raise_error):
"""Check if the object has been sated."""
if self._sated:
return
creation_stack = ''.join(
[line.rstrip()
for line in traceback.format_stack(self._stack_frame, limit=5)])
if raise_error:
try:
raise RuntimeError(
'Object was never used (type {}): {}. If you want to mark it as '
'used call its "mark_used()" method. It was originally created '
'here:\n{}'.format(self._type, self._repr, creation_stack))
finally:
self.sate()
else:
tf_logging.error(
'==================================\n'
'Object was never used (type {}):\n{}\nIf you want to mark it as '
'used call its "mark_used()" method.\nIt was originally created '
'here:\n{}\n'
'=================================='
.format(self._type, self._repr, creation_stack))
def __del__(self):
self._check_sated(raise_error=False)
def _new__init__(self, wrapped_value, tf_should_use_helper):
# pylint: disable=protected-access
self._tf_should_use_helper = tf_should_use_helper
self._tf_should_use_wrapped_value = wrapped_value
def _new__setattr__(self, key, value):
if key in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'):
return object.__setattr__(self, key, value)
return setattr(
object.__getattribute__(self, '_tf_should_use_wrapped_value'),
key, value)
def _new__getattribute__(self, key):
if key not in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'):
object.__getattribute__(self, '_tf_should_use_helper').sate()
if key in ('_tf_should_use_helper', 'mark_used', '__setatt__'):
return object.__getattribute__(self, key)
return getattr(
object.__getattribute__(self, '_tf_should_use_wrapped_value'), key)
def _new_mark_used(self, *args, **kwargs):
object.__getattribute__(self, '_tf_should_use_helper').sate()
try:
mu = object.__getattribute__(
object.__getattribute__(self, '_tf_should_use_wrapped_value'),
'mark_used')
return mu(*args, **kwargs)
except AttributeError:
pass
_WRAPPERS = {}
def _get_wrapper(x, tf_should_use_helper):
"""Create a wrapper for object x, whose class subclasses type(x).
The wrapper will emit a warning if it is deleted without any of its
properties being accessed or methods being called.
Args:
x: The instance to wrap.
tf_should_use_helper: The object that tracks usage.
Returns:
An object wrapping `x`, of type `type(x)`.
"""
type_x = type(x)
memoized = _WRAPPERS.get(type_x, None)
if memoized:
return memoized(x, tf_should_use_helper)
tx = copy.deepcopy(type_x)
copy_tx = type(tx.__name__, tx.__bases__, dict(tx.__dict__))
copy_tx.__init__ = _new__init__
copy_tx.__getattribute__ = _new__getattribute__
copy_tx.mark_used = _new_mark_used
copy_tx.__setattr__ = _new__setattr__
_WRAPPERS[type_x] = copy_tx
return copy_tx(x, tf_should_use_helper)
def _add_should_use_warning(x, error_in_function=False, warn_in_eager=False):
"""Wraps object x so that if it is never used, a warning is logged.
Args:
x: Python object.
error_in_function: Python bool. If `True`, a `RuntimeError` is raised
if the returned value is never used when created during `tf.function`
tracing.
warn_in_eager: Python bool. If `True` raise warning if in Eager mode as well
as graph mode.
Returns:
An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)`
and is a very shallow wrapper for `x` which logs access into `x`.
"""
if x is None or (isinstance(x, list) and not x):
return x
if context.executing_eagerly() and not warn_in_eager:
return x
if ops.inside_function() and not error_in_function:
# We don't currently log warnings in tf.function calls, so just skip it.
return x
# Extract the current frame for later use by traceback printing.
try:
raise ValueError()
except ValueError:
stack_frame = sys.exc_info()[2].tb_frame.f_back
tf_should_use_helper = _TFShouldUseHelper(
type_=type(x),
repr_=repr(x),
stack_frame=stack_frame,
error_in_function=error_in_function,
warn_in_eager=warn_in_eager)
return _get_wrapper(x, tf_should_use_helper)
def should_use_result(fn=None, warn_in_eager=False, error_in_function=False):
"""Function wrapper that ensures the function's output is used.
If the output is not used, a `logging.error` is logged. If
`error_in_function` is set, then a `RuntimeError` will be raised at the
end of function tracing if the output is not used by that point.
An output is marked as used if any of its attributes are read, modified, or
updated. Examples when the output is a `Tensor` include:
- Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`)
- Accessing a property (e.g. getting `t.name` or `t.op`).
- Calling `t.mark_used()`.
Note, certain behaviors cannot be tracked - for these the object may not
be marked as used. Examples include:
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
Args:
fn: The function to wrap.
warn_in_eager: Whether to create warnings in Eager as well.
error_in_function: Whether to raise an error when creating a tf.function.
Returns:
The wrapped function.
"""
def decorated(fn):
def wrapped(*args, **kwargs):
return _add_should_use_warning(fn(*args, **kwargs),
warn_in_eager=warn_in_eager,
error_in_function=error_in_function)
return tf_decorator.make_decorator(
target=fn,
decorator_func=wrapped,
decorator_name='should_use_result',
decorator_doc=(
(fn.__doc__ or '') +
('\n\n '
'**NOTE** The output of this function should be used. If it is '
'not, a warning will be logged or an error may be raised. '
'To mark the output as used, call its .mark_used() method.')))
if fn is not None:
return decorated(fn)
else:
return decorated
| apache-2.0 |
zxjzxj9/FlaskBoard | web/lib/python2.7/site-packages/click/testing.py | 68 | 11004 | import os
import sys
import shutil
import tempfile
import contextlib
from ._compat import iteritems, PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
if PY2:
from cStringIO import StringIO
else:
import io
from ._compat import _find_binary_reader
class EchoingStdin(object):
def __init__(self, input, output):
self._input = input
self._output = output
def __getattr__(self, x):
return getattr(self._input, x)
def _echo(self, rv):
self._output.write(rv)
return rv
def read(self, n=-1):
return self._echo(self._input.read(n))
def readline(self, n=-1):
return self._echo(self._input.readline(n))
def readlines(self):
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self):
return iter(self._echo(x) for x in self._input)
def __repr__(self):
return repr(self._input)
def make_input_stream(input, charset):
# Is already an input stream.
if hasattr(input, 'read'):
if PY2:
return input
rv = _find_binary_reader(input)
if rv is not None:
return rv
raise TypeError('Could not find binary reader for input stream.')
if input is None:
input = b''
elif not isinstance(input, bytes):
input = input.encode(charset)
if PY2:
return StringIO(input)
return io.BytesIO(input)
class Result(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(self, runner, output_bytes, exit_code, exception,
exc_info=None):
#: The runner that created the result
self.runner = runner
#: The output as bytes.
self.output_bytes = output_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happend if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The output as unicode string."""
return self.output_bytes.decode(self.runner.charset, 'replace') \
.replace('\r\n', '\n')
def __repr__(self):
return '<Result %s>' % (
self.exception and repr(self.exception) or 'okay',
)
class CliRunner(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
"""
def __init__(self, charset=None, env=None, echo_stdin=False):
if charset is None:
charset = 'utf-8'
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or 'root'
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = clickpkg.formatting.FORCED_WIDTH
clickpkg.formatting.FORCED_WIDTH = 80
env = self.make_env(env)
if PY2:
sys.stdout = sys.stderr = bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = sys.stderr = io.TextIOWrapper(
bytes_output, encoding=self.charset)
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or '')
val = input.readline().rstrip('\r\n')
sys.stdout.write(val + '\n')
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write((prompt or '') + '\n')
sys.stdout.flush()
return input.readline().rstrip('\r\n')
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = clickpkg.termui.visible_prompt_func
old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
old__getchar_func = clickpkg.termui._getchar
old_should_strip_ansi = clickpkg.utils.should_strip_ansi
clickpkg.termui.visible_prompt_func = visible_input
clickpkg.termui.hidden_prompt_func = hidden_input
clickpkg.termui._getchar = _getchar
clickpkg.utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(value)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield bytes_output
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
clickpkg.termui.visible_prompt_func = old_visible_prompt_func
clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
clickpkg.termui._getchar = old__getchar_func
clickpkg.utils.should_strip_ansi = old_should_strip_ansi
clickpkg.formatting.FORCED_WIDTH = old_forced_width
def invoke(self, cli, args=None, input=None, env=None,
catch_exceptions=True, color=False, **extra):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as out:
exception = None
exit_code = 0
try:
cli.main(args=args or (),
prog_name=self.get_default_prog_name(cli), **extra)
except SystemExit as e:
if e.code != 0:
exception = e
exc_info = sys.exc_info()
exit_code = e.code
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write('\n')
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = -1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
output = out.getvalue()
return Result(runner=self,
output_bytes=output,
exit_code=exit_code,
exception=exception,
exc_info=exc_info)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError):
pass
| apache-2.0 |
dgarage/bc2 | contrib/linearize/linearize-data.py | 105 | 10081 | #!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| mit |
OuterDeepSpace/OuterDeepSpace | libs/server/ige/Scheduler.py | 3 | 6619 | #
# Copyright 2001 - 2007 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import log
import datetime
class Scheduler(object):
def __init__(self, gameMngr):
self.gameMngr = gameMngr
self.lastTick = self.getNow()
self.tasks = []
self.loadConfig()
def tick(self):
t = self.getNow()
if t == self.lastTick:
return
log.debug("SCHEDULER", "Processing tick", t)
for task in self.tasks:
tmp = task.getNextEvent(t)
log.debug("SCHEDULER", task.name, task.getNextEvent(t))
if tmp == t:
log.message("SCHEDULER",task.name, "Running action")
try:
task.callable()
except Exception, e:
log.warning("Cannot execute scheduler action")
self.lastTick = t
def getNow(self):
return datetime.datetime.now().replace(second = 0, microsecond = 0)
def loadConfig(self):
# load cleanup
self.loadTask(
"CLEANUP", "scheduler:cleanup",
self.gameMngr.clientMngr.cleanupSessions
)
# load turn processing
self.loadTask(
"TURN", "scheduler:turn",
self.gameMngr.processTurnInternal
)
# load backup
self.loadTask(
"BACKUP", "scheduler:backup",
self.gameMngr.backupInternal
)
# load metaserver keepalive
self.loadTask(
"METAKEEPALIVE", "scheduler:metaserver",
self.gameMngr.keepAlive
)
def loadTask(self, name, sectionName, action):
for suffix in ("", ":0", ":1", ":2", ":3", ":4", ":5", ":6", ":7", ":8", ":9"):
section = self.gameMngr.config.getSection(sectionName + suffix)
enabled = section.enabled == "yes"
if not enabled:
continue
minute = self.parse(section.minute)
hour = self.parse(section.hour)
day = self.parse(section.day)
weekday = self.parse(section.weekday)
month = self.parse(section.month)
task = Task(minute = minute, hour = hour, weekday = weekday,
day = day, month = month,
callable = action,
name = name + suffix
)
self.tasks.append(task)
def parse(self, value):
if value:
return map(int, value.split(","))
return []
class Task:
def __init__(self, minute = [], hour = [], weekday = [], day = [], month = [], callable = None, name = None):
assert not(weekday and day), "You cannot supply weekday and day at the same time"
assert callable is not None, "You must supply a callable"
self.doWeekday = bool(weekday)
self.doDay = bool(day)
self.minute = minute or range(0, 60)
self.hour = hour or range(0, 24)
self.weekday = weekday or range(0, 7)
self.day = day or range(1, 32)
self.month = month or range(1, 13)
self.callable = callable
self.name = name
def __str__(self):
compact = lambda s: ",".join(map(str, s))
return "<%s %s %s-%s(%s) %s:%s>" % (
self.__class__,
self.name,
compact(self.month),
compact(self.day),
compact(self.weekday),
compact(self.hour),
compact(self.minute)
)
def getNumOfDaysInMonth(self, month, year):
days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
if calendar.isleap(year) and month == 2:
days += 1
return days
def getNextEvent(self, now):
#@print "# CALLED", now
event = now.replace(second = 0, microsecond = 0)
# match a month
found = False
for i in self.month:
if event.month < i:
#@print "forced month", event,
event = event.replace(month = i, day = 1, hour = 0, minute = 0)
#@print "->", event
found = True
break
elif event.month == i:
#@print "perfect month", event
found = True
break
if not found:
#@print "next year", event,
event = event.replace(year = event.year + 1, month = self.month[0], day = 1, hour = 0, minute = 0)
#@print "->", event
# match a day/weekday (only one of them can be specified)
if self.doWeekday:
found = False
for i in self.weekday:
if event.weekday() == i:
#@print "perfect weekday", event
found = True
break
if not found:
#@print "next weekday", event, event.weekday(),
event = event.replace(hour = 0, minute = 0) + datetime.timedelta(days = 1)
#@print "->", event, event.weekday()
return self.getNextEvent(event)
elif self.doDay:
found = False
for i in self.day:
if event.day < i:
#@print "forced day", event,
event = event.replace(day = i, hour = 0, minute = 0)
#@print "->", event
found = True
break
elif event.day == i:
#@print "perfect day", event
found = True
break
if not found:
#@print "next month", event,
event = event.replace(day = self.day[0], hour = 0, minute = 0) + datetime.timedelta(months = 1)
#@print "->", event
return self.getNextEvent(event)
# match an hour
found = False
for i in self.hour:
if event.hour < i:
#@print "forced hour", event,
event = event.replace(hour = i, minute = 0)
#@print "->", event
found = True
break
if event.hour == i:
#@print "perfect hour", event
found = True
break
if not found:
#@print "next day", event,
event = event.replace(hour = self.hour[0], minute = 0) + datetime.timedelta(days = 1)
#@print "->", event
return self.getNextEvent(event)
# match a minute
found = False
for i in self.minute:
if event.minute <= i:
#@print "forced minute", event,
event = event.replace(minute = i)
#@print "->", event
found = True
break
if not found:
#@print "next hour", event,
event = event.replace(minute = self.minute[0]) + datetime.timedelta(hours = 1)
#@print "->", event
return self.getNextEvent(event)
return event
if __name__ == "__main__":
t = Task(minute = [5, 30], day = [10], month = [1], callable = log)
print t.getNextEvent(datetime.datetime(2008, 11, 15, 23, 01))
t = Task(minute = [1], hour = [0], day = [10], month = [1], callable = log)
print t.getNextEvent(datetime.datetime(2008, 01, 10, 00, 02))
t = Task(minute = [0, 30], weekday = [1,5], callable = log)
print t.getNextEvent(datetime.datetime(2006, 11, 15, 22, 31))
| gpl-2.0 |
brianjmiller/TinCanPython | tincan/documents/activity_profile_document.py | 6 | 2456 | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tincan.documents import Document
from tincan.activity import Activity
class ActivityProfileDocument(Document):
"""Extends :class:`tincan.Document` with an Activity field, can be created from a dict, another
:class:`tincan.Document`, or from kwargs.
:param id: The id of this document
:type id: unicode
:param content_type: The content_type of the content of this document
:type content_type: unicode
:param content: The content of this document
:type content: bytearray
:param etag: The etag of this document
:type etag: unicode
:param timestamp: The timestamp of this document
:type timestamp: :class:`datetime.datetime`
:param activity: The activity object of this document
:type activity: :class:`tincan.Activity`
"""
_props_req = list(Document._props_req)
_props_req.extend([
'activity',
])
_props = list(Document._props)
_props.extend(_props_req)
def __init__(self, *args, **kwargs):
self._activity = None
super(ActivityProfileDocument, self).__init__(*args, **kwargs)
@property
def activity(self):
"""The Document's activity object
:setter: Tries to convert to :class:`tincan.Activity`
:setter type: :class:`tincan.Activity`
:rtype: :class:`tincan.Activity`
"""
return self._activity
@activity.setter
def activity(self, value):
if not isinstance(value, Activity) and value is not None:
try:
value = Activity(value)
except:
raise TypeError(
"Property 'activity' in 'tincan.%s' must be set with a type "
"that can be constructed into an tincan.Activity object." %
self.__class__.__name__
)
self._activity = value
| apache-2.0 |
circleback/sensu-community-plugins | plugins/openstack/neutron/check_neutron-api.py | 49 | 2449 | #!/usr/bin/env python
#
# Check OpenStack Neutron API Status
# ===
#
# Dependencies
# -----------
# - python-neutronclient and related libraries
#
# Performs API query to determine 'alive' status of the
# Neutron API.
#
# Author: Mike Dorman <mdorman@godaddy.com>
# Significantly based on neutron-agent-status.py by
# Brian Clark <brian.clark@cloudapt.com>
#
# Released under the same terms as Sensu (the MIT license);
# see LICENSE for details.
#
# #RED
import sys
import argparse
import logging
from neutronclient.neutron import client
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
STATE_UNKNOWN = 3
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Check OpenStack Neutron API status')
parser.add_argument('--auth-url', metavar='URL', type=str,
required=True,
help='Keystone URL')
parser.add_argument('--username', metavar='username', type=str,
required=True,
help='username for authentication')
parser.add_argument('--password', metavar='password', type=str,
required=True,
help='password for authentication')
parser.add_argument('--tenant', metavar='tenant', type=str,
required=True,
help='tenant name for authentication')
parser.add_argument('--region_name', metavar='region', type=str,
help='Region to select for authentication')
parser.add_argument('--bypass', metavar='bybass', type=str,
required=False,
help='bypass the service catalog and use this URL for Nova API')
args = parser.parse_args()
try:
c = client.Client('2.0',
username=args.username,
tenant_name=args.tenant,
password=args.password,
auth_url=args.auth_url,
region_name=args.region_name,
insecure=True,
endpoint_url=args.bypass)
networks = c.list_networks()
except Exception as e:
print str(e)
sys.exit(STATE_CRITICAL)
if len(networks) < 1:
exit_state = STATE_WARNING
state_string = "WARNING"
else:
exit_state = STATE_OK
state_string = "OK"
print "Neutron API status: {state_str}, {networks} network(s) found.".format(state_str=state_string, networks=len(networks))
sys.exit(exit_state)
| mit |
RafaelTorrealba/odoo | addons/mrp_operations/mrp_operations.py | 193 | 27173 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import time
from datetime import datetime
from openerp.tools.translate import _
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_dest_id_lines': fields.one2many('stock.move','move_dest_id', 'Children Moves')
}
class mrp_production_workcenter_line(osv.osv):
def _get_date_end(self, cr, uid, ids, field_name, arg, context=None):
""" Finds ending date.
@return: Dictionary of values.
"""
ops = self.browse(cr, uid, ids, context=context)
date_and_hours_by_cal = [(op.date_planned, op.hour, op.workcenter_id.calendar_id.id) for op in ops if op.date_planned]
intervals = self.pool.get('resource.calendar').interval_get_multi(cr, uid, date_and_hours_by_cal)
res = {}
for op in ops:
res[op.id] = False
if op.date_planned:
i = intervals.get((op.date_planned, op.hour, op.workcenter_id.calendar_id.id))
if i:
res[op.id] = i[-1][1].strftime('%Y-%m-%d %H:%M:%S')
else:
res[op.id] = op.date_planned
return res
def onchange_production_id(self, cr, uid, ids, production_id, context=None):
if not production_id:
return {}
production = self.pool.get('mrp.production').browse(cr, uid, production_id, context=None)
result = {
'product': production.product_id.id,
'qty': production.product_qty,
'uom': production.product_uom.id,
}
return {'value': result}
_inherit = 'mrp.production.workcenter.line'
_order = "sequence, date_planned"
_columns = {
'state': fields.selection([('draft','Draft'),('cancel','Cancelled'),('pause','Pending'),('startworking', 'In Progress'),('done','Finished')],'Status', readonly=True, copy=False,
help="* When a work order is created it is set in 'Draft' status.\n" \
"* When user sets work order in start mode that time it will be set in 'In Progress' status.\n" \
"* When work order is in running mode, during that time if user wants to stop or to make changes in order then can set in 'Pending' status.\n" \
"* When the user cancels the work order it will be set in 'Canceled' status.\n" \
"* When order is completely processed that time it is set in 'Finished' status."),
'date_planned': fields.datetime('Scheduled Date', select=True),
'date_planned_end': fields.function(_get_date_end, string='End Date', type='datetime'),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'delay': fields.float('Working Hours',help="The elapsed time between operation start and stop in this Work Center",readonly=True),
'production_state':fields.related('production_id','state',
type='selection',
selection=[('draft','Draft'),('confirmed','Waiting Goods'),('ready','Ready to Produce'),('in_production','In Production'),('cancel','Canceled'),('done','Done')],
string='Production Status', readonly=True),
'product':fields.related('production_id','product_id',type='many2one',relation='product.product',string='Product',
readonly=True),
'qty':fields.related('production_id','product_qty',type='float',string='Qty',readonly=True, store=True),
'uom':fields.related('production_id','product_uom',type='many2one',relation='product.uom',string='Unit of Measure',readonly=True),
}
_defaults = {
'state': 'draft',
'delay': 0.0,
'production_state': 'draft'
}
def modify_production_order_state(self, cr, uid, ids, action):
""" Modifies production order state if work order state is changed.
@param action: Action to perform.
@return: Nothing
"""
prod_obj_pool = self.pool.get('mrp.production')
oper_obj = self.browse(cr, uid, ids)[0]
prod_obj = oper_obj.production_id
if action == 'start':
if prod_obj.state =='confirmed':
prod_obj_pool.force_production(cr, uid, [prod_obj.id])
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='ready':
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='in_production':
return
else:
raise osv.except_osv(_('Error!'),_('Manufacturing order cannot be started in state "%s"!') % (prod_obj.state,))
else:
open_count = self.search_count(cr,uid,[('production_id','=',prod_obj.id), ('state', '!=', 'done')])
flag = not bool(open_count)
if flag:
for production in prod_obj_pool.browse(cr, uid, [prod_obj.id], context= None):
if production.move_lines or production.move_created_ids:
prod_obj_pool.action_produce(cr,uid, production.id, production.product_qty, 'consume_produce', context = None)
prod_obj_pool.signal_workflow(cr, uid, [oper_obj.production_id.id], 'button_produce_done')
return
def write(self, cr, uid, ids, vals, context=None, update=True):
result = super(mrp_production_workcenter_line, self).write(cr, uid, ids, vals, context=context)
prod_obj = self.pool.get('mrp.production')
if vals.get('date_planned', False) and update:
for prod in self.browse(cr, uid, ids, context=context):
if prod.production_id.workcenter_lines:
dstart = min(vals['date_planned'], prod.production_id.workcenter_lines[0]['date_planned'])
prod_obj.write(cr, uid, [prod.production_id.id], {'date_start':dstart}, context=context, mini=False)
return result
def action_draft(self, cr, uid, ids, context=None):
""" Sets state to draft.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_start_working(self, cr, uid, ids, context=None):
""" Sets state to start working and writes starting date.
@return: True
"""
self.modify_production_order_state(cr, uid, ids, 'start')
self.write(cr, uid, ids, {'state':'startworking', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
""" Sets state to done, writes finish date and calculates delay.
@return: True
"""
delay = 0.0
date_now = time.strftime('%Y-%m-%d %H:%M:%S')
obj_line = self.browse(cr, uid, ids[0])
date_start = datetime.strptime(obj_line.date_start,'%Y-%m-%d %H:%M:%S')
date_finished = datetime.strptime(date_now,'%Y-%m-%d %H:%M:%S')
delay += (date_finished-date_start).days * 24
delay += (date_finished-date_start).seconds / float(60*60)
self.write(cr, uid, ids, {'state':'done', 'date_finished': date_now,'delay':delay}, context=context)
self.modify_production_order_state(cr,uid,ids,'done')
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Sets state to cancel.
@return: True
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def action_pause(self, cr, uid, ids, context=None):
""" Sets state to pause.
@return: True
"""
return self.write(cr, uid, ids, {'state':'pause'}, context=context)
def action_resume(self, cr, uid, ids, context=None):
""" Sets state to startworking.
@return: True
"""
return self.write(cr, uid, ids, {'state':'startworking'}, context=context)
class mrp_production(osv.osv):
_inherit = 'mrp.production'
_columns = {
'allow_reorder': fields.boolean('Free Serialisation', help="Check this to be able to move independently all production orders, without moving dependent ones."),
}
def _production_date_end(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates planned end date of production order.
@return: Dictionary of values
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = prod.date_planned
for line in prod.workcenter_lines:
result[prod.id] = max(line.date_planned_end, result[prod.id])
return result
def action_production_end(self, cr, uid, ids, context=None):
""" Finishes work order if production order is done.
@return: Super method
"""
obj = self.browse(cr, uid, ids, context=context)[0]
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for workcenter_line in obj.workcenter_lines:
if workcenter_line.state == 'draft':
workcenter_line.signal_workflow('button_start_working')
workcenter_line.signal_workflow('button_done')
return super(mrp_production,self).action_production_end(cr, uid, ids, context=context)
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for prod in self.browse(cr, uid, ids):
if prod.workcenter_lines:
workcenter_pool.signal_workflow(cr, uid, [prod.workcenter_lines[0].id], 'button_start_working')
return super(mrp_production,self).action_in_production(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels work order if production order is canceled.
@return: Super method
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
obj = self.browse(cr, uid, ids,context=context)[0]
workcenter_pool.signal_workflow(cr, uid, [record.id for record in obj.workcenter_lines], 'button_cancel')
return super(mrp_production,self).action_cancel(cr,uid,ids,context=context)
def _compute_planned_workcenter(self, cr, uid, ids, context=None, mini=False):
""" Computes planned and finished dates for work order.
@return: Calculated date
"""
dt_end = datetime.now()
if context is None:
context = {}
for po in self.browse(cr, uid, ids, context=context):
dt_end = datetime.strptime(po.date_planned, '%Y-%m-%d %H:%M:%S')
if not po.date_start:
self.write(cr, uid, [po.id], {
'date_start': po.date_planned
}, context=context, update=False)
old = None
for wci in range(len(po.workcenter_lines)):
wc = po.workcenter_lines[wci]
if (old is None) or (wc.sequence>old):
dt = dt_end
if context.get('__last_update'):
del context['__last_update']
if (wc.date_planned < dt.strftime('%Y-%m-%d %H:%M:%S')) or mini:
self.pool.get('mrp.production.workcenter.line').write(cr, uid, [wc.id], {
'date_planned': dt.strftime('%Y-%m-%d %H:%M:%S')
}, context=context, update=False)
i = self.pool.get('resource.calendar').interval_get(
cr,
uid,
#passing False makes resource_resource._schedule_hours run 1000 iterations doing nothing
wc.workcenter_id.calendar_id and wc.workcenter_id.calendar_id.id or None,
dt,
wc.hour or 0.0
)
if i:
dt_end = max(dt_end, i[-1][1])
else:
dt_end = datetime.strptime(wc.date_planned_end, '%Y-%m-%d %H:%M:%S')
old = wc.sequence or 0
super(mrp_production, self).write(cr, uid, [po.id], {
'date_finished': dt_end
})
return dt_end
def _move_pass(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves finding interval from resource calendar.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
todo = list(po.move_lines)
dt = datetime.strptime(po.date_start,'%Y-%m-%d %H:%M:%S')
while todo:
l = todo.pop(0)
if l.state in ('done','cancel','draft'):
continue
todo += l.move_dest_id_lines
date_end = l.production_id.date_finished
if date_end and datetime.strptime(date_end, '%Y-%m-%d %H:%M:%S') > dt:
if l.production_id.state not in ('done','cancel'):
for wc in l.production_id.workcenter_lines:
i = self.pool.get('resource.calendar').interval_min_get(
cr,
uid,
wc.workcenter_id.calendar_id.id or False,
dt, wc.hour or 0.0
)
dt = i[0][0]
if l.production_id.date_start > dt.strftime('%Y-%m-%d %H:%M:%S'):
self.write(cr, uid, [l.production_id.id], {'date_start':dt.strftime('%Y-%m-%d %H:%M:%S')}, mini=True)
return True
def _move_futur(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
for line in po.move_created_ids:
l = line
while l.move_dest_id:
l = l.move_dest_id
if l.state in ('done','cancel','draft'):
break
if l.production_id.state in ('done','cancel'):
break
if l.production_id and (l.production_id.date_start < po.date_finished):
self.write(cr, uid, [l.production_id.id], {'date_start': po.date_finished})
break
return True
def write(self, cr, uid, ids, vals, context=None, update=True, mini=True):
direction = {}
if vals.get('date_start', False):
for po in self.browse(cr, uid, ids, context=context):
direction[po.id] = cmp(po.date_start, vals.get('date_start', False))
result = super(mrp_production, self).write(cr, uid, ids, vals, context=context)
if (vals.get('workcenter_lines', False) or vals.get('date_start', False) or vals.get('date_planned', False)) and update:
self._compute_planned_workcenter(cr, uid, ids, context=context, mini=mini)
for d in direction:
if direction[d] == 1:
# the production order has been moved to the passed
self._move_pass(cr, uid, [d], context=context)
pass
elif direction[d] == -1:
self._move_futur(cr, uid, [d], context=context)
# the production order has been moved to the future
pass
return result
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product and planned date of work order.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
result = super(mrp_production, self).action_compute(cr, uid, ids, properties=properties, context=context)
self._compute_planned_workcenter(cr, uid, ids, context=context)
return result
class mrp_operations_operation_code(osv.osv):
_name="mrp_operations.operation.code"
_columns={
'name': fields.char('Operation Name', required=True),
'code': fields.char('Code', size=16, required=True),
'start_stop': fields.selection([('start','Start'),('pause','Pause'),('resume','Resume'),('cancel','Cancelled'),('done','Done')], 'Status', required=True),
}
class mrp_operations_operation(osv.osv):
_name="mrp_operations.operation"
def _order_date_search_production(self, cr, uid, ids, context=None):
""" Finds operations for a production order.
@return: List of ids
"""
operation_ids = self.pool.get('mrp_operations.operation').search(cr, uid, [('production_id','=',ids[0])], context=context)
return operation_ids
def _get_order_date(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates planned date for an operation.
@return: Dictionary of values
"""
res={}
operation_obj = self.browse(cr, uid, ids, context=context)
for operation in operation_obj:
res[operation.id] = operation.production_id.date_planned
return res
def calc_delay(self, cr, uid, vals):
""" Calculates delay of work order.
@return: Delay
"""
code_lst = []
time_lst = []
code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]
oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs = self.browse(cr,uid,oper_ids)
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
time_lst.append(oper.date_start)
code_lst.append(code.start_stop)
time_lst.append(vals['date_start'])
diff = 0
for i in range(0,len(code_lst)):
if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
if not i: continue
if code_lst[i-1] not in ('resume','start'):
continue
a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
diff += (b-a).days * 24
diff += (b-a).seconds / float(60*60)
return diff
def check_operation(self, cr, uid, vals):
""" Finds which operation is called ie. start, pause, done, cancel.
@param vals: Dictionary of values.
@return: True or False
"""
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr,uid,code_ids)[0]
code_lst = []
oper_ids=self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs=self.browse(cr,uid,oper_ids)
if not oper_objs:
if code.start_stop!='start':
raise osv.except_osv(_('Sorry!'),_('Operation is not started yet!'))
return False
else:
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
if code.start_stop=='start':
if 'start' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation has already started! You can either Pause/Finish/Cancel the operation.'))
return False
if code.start_stop=='pause':
if code_lst[len(code_lst)-1]!='resume' and code_lst[len(code_lst)-1]!='start':
raise osv.except_osv(_('Error!'),_('In order to Pause the operation, it must be in the Start or Resume state!'))
return False
if code.start_stop=='resume':
if code_lst[len(code_lst)-1]!='pause':
raise osv.except_osv(_('Error!'),_('In order to Resume the operation, it must be in the Pause state!'))
return False
if code.start_stop=='done':
if code_lst[len(code_lst)-1]!='start' and code_lst[len(code_lst)-1]!='resume':
raise osv.except_osv(_('Sorry!'),_('In order to Finish the operation, it must be in the Start or Resume state!'))
return False
if 'cancel' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation is Already Cancelled!'))
return False
if code.start_stop=='cancel':
if not 'start' in code_lst :
raise osv.except_osv(_('Error!'),_('No operation to cancel.'))
return False
if 'done' in code_lst:
raise osv.except_osv(_('Error!'),_('Operation is already finished!'))
return False
return True
def write(self, cr, uid, ids, vals, context=None):
oper_objs = self.browse(cr, uid, ids, context=context)[0]
vals['production_id']=oper_objs.production_id.id
vals['workcenter_id']=oper_objs.workcenter_id.id
if 'code_id' in vals:
self.check_operation(cr, uid, vals)
if 'date_start' in vals:
vals['date_start']=vals['date_start']
vals['code_id']=oper_objs.code_id.id
delay=self.calc_delay(cr, uid, vals)
wc_op_id=self.pool.get('mrp.production.workcenter.line').search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
self.pool.get('mrp.production.workcenter.line').write(cr,uid,wc_op_id,{'delay':delay})
return super(mrp_operations_operation, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids, context=context)[0]
wc_op_id=workcenter_pool.search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
if code.start_stop in ('start','done','pause','cancel','resume'):
if not wc_op_id:
production_obj=self.pool.get('mrp.production').browse(cr, uid, vals['production_id'], context=context)
wc_op_id.append(workcenter_pool.create(cr,uid,{'production_id':vals['production_id'],'name':production_obj.product_id.name,'workcenter_id':vals['workcenter_id']}))
if code.start_stop=='start':
workcenter_pool.action_start_working(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_start_working')
if code.start_stop=='done':
workcenter_pool.action_done(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_done')
self.pool.get('mrp.production').write(cr,uid,vals['production_id'],{'date_finished':datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
if code.start_stop=='pause':
workcenter_pool.action_pause(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_pause')
if code.start_stop=='resume':
workcenter_pool.action_resume(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_resume')
if code.start_stop=='cancel':
workcenter_pool.action_cancel(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_cancel')
if not self.check_operation(cr, uid, vals):
return
delay=self.calc_delay(cr, uid, vals)
line_vals = {}
line_vals['delay'] = delay
if vals.get('date_start',False):
if code.start_stop == 'done':
line_vals['date_finished'] = vals['date_start']
elif code.start_stop == 'start':
line_vals['date_start'] = vals['date_start']
self.pool.get('mrp.production.workcenter.line').write(cr, uid, wc_op_id, line_vals, context=context)
return super(mrp_operations_operation, self).create(cr, uid, vals, context=context)
def initialize_workflow_instance(self, cr, uid, context=None):
mrp_production_workcenter_line = self.pool.get('mrp.production.workcenter.line')
line_ids = mrp_production_workcenter_line.search(cr, uid, [], context=context)
mrp_production_workcenter_line.create_workflow(cr, uid, line_ids)
return True
_columns={
'production_id':fields.many2one('mrp.production','Production',required=True),
'workcenter_id':fields.many2one('mrp.workcenter','Work Center',required=True),
'code_id':fields.many2one('mrp_operations.operation.code','Code',required=True),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'order_date': fields.function(_get_order_date,string='Order Date',type='date',store={'mrp.production':(_order_date_search_production,['date_planned'], 10)}),
}
_defaults={
'date_start': lambda *a:datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yl565/statsmodels | statsmodels/graphics/_regressionplots_doc.py | 31 | 3795 | _plot_added_variable_doc = """\
Create an added variable plot for a fitted regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : int or string
The column index of exog, or a variable name, indicating the
variable whose role in the regression is to be assessed.
resid_type : string
The type of residuals to use for the dependent variable. If
None, uses `resid_deviance` for GLM/GEE and `resid` otherwise.
use_glm_weights : bool
Only used if the model is a GLM or GEE. If True, the
residuals for the focus predictor are computed using WLS, with
the weights obtained from the IRLS calculations for fitting
the GLM. If False, unweighted regression is used.
fit_kwargs : dict, optional
Keyword arguments to be passed to fit when refitting the
model.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
_plot_partial_residuals_doc = """\
Create a partial residual, or 'component plus residual' plot for a
fited regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : int or string
The column index of exog, or variable name, indicating the
variable whose role in the regression is to be assessed.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
_plot_ceres_residuals_doc = """\
Produces a CERES (Conditional Expectation Partial Residuals)
plot for a fitted regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : integer or string
The column index of results.model.exog, or the variable name,
indicating the variable whose role in the regression is to be
assessed.
frac : float
Lowess tuning parameter for the adjusted model used in the
CERES analysis. Not used if `cond_means` is provided.
cond_means : array-like, optional
If provided, the columns of this array span the space of the
conditional means E[exog | focus exog], where exog ranges over
some or all of the columns of exog (other than the focus exog).
ax : matplotlib.Axes instance, optional
The axes on which to draw the plot. If not provided, a new
axes instance is created.
Returns
-------
fig : matplotlib.Figure instance
The figure on which the partial residual plot is drawn.
References
----------
RD Cook and R Croos-Dabrera (1998). Partial residual plots in
generalized linear models. Journal of the American
Statistical Association, 93:442.
RD Cook (1993). Partial residual plots. Technometrics 35:4.
Notes
-----
`cond_means` is intended to capture the behavior of E[x1 |
x2], where x2 is the focus exog and x1 are all the other exog
variables. If all the conditional mean relationships are
linear, it is sufficient to set cond_means equal to the focus
exog. Alternatively, cond_means may consist of one or more
columns containing functional transformations of the focus
exog (e.g. x2^2) that are thought to capture E[x1 | x2].
If nothing is known or suspected about the form of E[x1 | x2],
set `cond_means` to None, and it will be estimated by
smoothing each non-focus exog against the focus exog. The
values of `frac` control these lowess smooths.
If cond_means contains only the focus exog, the results are
equivalent to a partial residual plot.
If the focus variable is believed to be independent of the
other exog variables, `cond_means` can be set to an (empty)
nx0 array.
"""
| bsd-3-clause |
pekeler/arangodb | 3rdParty/V8-4.3.61/build/gyp/test/actions/src/subdir1/counter.py | 261 | 1107 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import time
output = sys.argv[1]
persistoutput = "%s.persist" % sys.argv[1]
count = 0
try:
count = open(persistoutput, 'r').read()
except:
pass
count = int(count) + 1
if len(sys.argv) > 2:
max_count = int(sys.argv[2])
if count > max_count:
count = max_count
oldcount = 0
try:
oldcount = open(output, 'r').read()
except:
pass
# Save the count in a file that is undeclared, and thus hidden, to gyp. We need
# to do this because, prior to running commands, some build systems deletes
# any declared outputs, so we would lose our count if we just wrote to the
# given output file.
open(persistoutput, 'w').write('%d' % (count))
# Only write the given output file if the count has changed.
if int(oldcount) != count:
open(output, 'w').write('%d' % (count))
# Sleep so the next run changes the file time sufficiently to make the build
# detect the file as changed.
time.sleep(1)
sys.exit(0)
| apache-2.0 |
giggsey/SickRage | sickbeard/providers/frenchtorrentdb.py | 7 | 10668 | # Authors: Yannick Croissant <yannick.croissant@gmail.com>
# adaur <adaur.underground@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
from requests.auth import AuthBase
import sickbeard
import generic
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
class FrenchTorrentDBProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "FrenchTorrentDB")
self.supportsBacklog = True
self.public = False
self.urls = {
'base_url': 'http://www.frenchtorrentdb.com',
}
self.url = self.urls['base_url']
self.search_params = {
"adv_cat%5Bs%5D%5B1%5D": 95,
"adv_cat%5Bs%5D%5B2%5D": 190,
"adv_cat%5Bs%5D%5B3%5D": 101,
"adv_cat%5Bs%5D%5B4%5D": 191,
"adv_cat%5Bs%5D%5B5%5D": 197,
"adv_cat%5Bs%5D%5B7%5D": 199,
"adv_cat%5Bs%5D%5B8%5D": 201,
"adv_cat%5Bs%5D%5B9%5D": 128,
"section": "TORRENTS",
"exact": 1,
"submit": "GO"
}
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
def isEnabled(self):
return self.enabled
def imageName(self):
return 'frenchtorrentdb.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
params = {
"section": "LOGIN",
"challenge": 1
}
data = self.getURL(self.url, params=params, json=True)
post_data = {
'username' : self.username,
'password' : self.password,
'secure_login': self._getSecureLogin(data['challenge']),
'hash' : data['hash']
}
params.pop('challenge')
params['ajax'] = 1
self.getURL(self.url, params=params, post_data=post_data)
return True
def _getSecureLogin(self, challenges):
def fromCharCode(*args):
return ''.join(map(unichr, args))
def decodeString(p, a, c, k, e, d):
a = int(a)
c = int(c)
def e(c):
if c < a:
f = ''
else:
f = e(c / a)
return f + fromCharCode(c % a + 161)
while c:
c -= 1
if k[c]:
regex = re.compile(e(c))
p = re.sub(regex, k[c], p)
return p
def decodeChallenge(challenge):
regexGetArgs = re.compile('\'([^\']+)\',([0-9]+),([0-9]+),\'([^\']+)\'')
regexIsEncoded = re.compile('decodeURIComponent')
regexUnquote = re.compile('\'')
if challenge == 'a':
return '05f'
if re.match(regexIsEncoded, challenge) == None:
return re.sub(regexUnquote, '', challenge)
args = re.findall(regexGetArgs, challenge)
decoded = decodeString(
args[0][0], args[0][1],
args[0][2], args[0][3].split('|'),
0, {})
return decoded
secureLogin = ''
for challenge in challenges:
secureLogin += decodeChallenge(challenge)
return secureLogin
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
sickbeard.config.naming_ep_type[2] % {
'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode
} + ' %s' % add_string
search_string['Episode'].append(re.sub(r'\s+', '.', ep_string))
return [search_string]
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
# check for auth
if not self._doLogin():
return results
for mode in search_strings.keys():
for search_string in search_strings[mode]:
logger.log(u"Search string: " + search_string, logger.DEBUG)
self.search_params['name'] = search_string
r = self.getURL(self.url, params=self.search_params)
with BS4Parser(r, features=["html5lib", "permissive"]) as html:
resultsTable = html.find("div", {"class": "DataGrid"})
logger.log(u"Page opened", logger.DEBUG)
if resultsTable:
logger.log(u"We have results ", logger.DEBUG)
rows = resultsTable.findAll("ul")
for row in rows:
link = row.find("a", title=True)
title = link['title']
autogetURL = self.url +'/' + (row.find("li", {"class": "torrents_name"}).find('a')['href'][1:]).replace('#FTD_MENU' ,'&menu=4')
r = self.getURL(autogetURL)
with BS4Parser(r, features=["html5lib", "permissive"]) as html:
downloadURL = html.find("div", {"class" : "autoget"}).find('a')['href']
item = title, downloadURL
logger.log(u"Download URL : " + downloadURL, logger.DEBUG)
items[mode].append(item)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return results
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
search_params = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(search_params[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
class FrenchTorrentDBAuth(AuthBase):
"""Attaches HTTP Authentication to the given Request object."""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = self.token
return r
class FrenchTorrentDBCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll FTDB every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = FrenchTorrentDBProvider()
| gpl-3.0 |
yury-s/v8-inspector | Source/chrome/testing/gtest/test/gtest_xml_test_utils.py | 1815 | 8876 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| bsd-3-clause |
jaruba/chromium.src | tools/telemetry/telemetry/image_processing/screen_finder_unittest.py | 12 | 15808 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import math
import os
import unittest
from telemetry.core import util
from telemetry.util import external_modules
try:
np = external_modules.ImportRequiredModule('numpy')
cv2 = external_modules.ImportRequiredModule('cv2')
except (ImportError, NotImplementedError) as err:
pass
else:
# pylint: disable=W0212
class ScreenFinderTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ScreenFinderTest, self).__init__(*args, **kwargs)
# Import modules with dependencies that may not be preset in test setup so
# that importing this unit test doesn't cause the test runner to raise an
# exception.
from telemetry.image_processing import screen_finder
from telemetry.image_processing.io import fake_frame_generator
from telemetry.image_processing.io import video_file_frame_generator
self.FakeFrameGenerator = fake_frame_generator.FakeFrameGenerator
self.VideoFileFrameGenerator = \
video_file_frame_generator.VideoFileFrameGenerator
self.ScreenFinder = screen_finder.ScreenFinder
def _GetScreenFinder(self, video_filename):
if not video_filename:
fg = self.FakeFrameGenerator()
else:
vid = os.path.join(util.GetUnittestDataDir(), video_filename)
fg = self.VideoFileFrameGenerator(vid)
return self.ScreenFinder(fg)
def testBasicFunctionality(self):
def CheckCorners(corners, expected):
for i in xrange(len(corners)):
for j in xrange(len(corners[i])):
self.assertAlmostEqual(corners[i][j], expected[i][j], delta=1.1)
expected = [[314, 60], [168, 58], [162, 274], [311, 276]]
sf = self._GetScreenFinder('screen_3_frames.mov')
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height, width = screen.shape[:2]
self.assertAlmostEqual(height, 226, delta=2)
self.assertAlmostEqual(width, 156, delta=2)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height1, width1 = screen.shape[:2]
self.assertEqual(width, width1)
self.assertEqual(height, height1)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height2, width2 = screen.shape[:2]
self.assertEqual(width, width2)
self.assertEqual(height, height2)
self.assertFalse(sf.HasNext())
error = ''
try:
sf.GetNext()
except RuntimeError as e:
error = str(e)
self.assertEqual(error, 'No more frames available.')
def testHasMovedFast(self):
sf = self._GetScreenFinder(None)
prev_corners = np.asfarray(([1000, 1000], [0, 1000], [0, 0], [1000, 0]))
self.assertFalse(sf._HasMovedFast(prev_corners, prev_corners))
not_moved = copy.deepcopy(prev_corners)
not_moved[0][1] += 1
not_moved[1][1] += 1
not_moved[3][0] += 0.9
self.assertFalse(sf._HasMovedFast(not_moved, prev_corners))
moved = copy.deepcopy(prev_corners)
moved[0][1] += math.sqrt(0.5)
moved[0][0] += math.sqrt(0.5)
moved[1][1] += 2.1
self.assertTrue(sf._HasMovedFast(moved, prev_corners))
def testPointConnectsToCorners(self):
sf = self._GetScreenFinder(None)
line1 = np.asfarray(((0, 0, 1, 0)))
line2 = np.asfarray(((0, 0, 0, 1)))
point = np.asfarray((0, 0))
point_info = (point, line1, line2)
corners = np.asfarray(((1, 0), (0, 1)))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 1))
corners = np.append(corners, (100, 1))
corners = np.append(corners, (1, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 2))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 0.5))
corners = np.append(corners, (100, 0))
corners = np.append(corners, (0, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 0))
def testFindIntersections(self):
def _BuildResult(point, line1, line2):
return [point, np.asfarray(line1).tolist(), np.asfarray(line2).tolist()]
def _IntersectionResultsToList(results):
result_list = []
for result in results:
point, line1, line2 = result
p = np.round(point).tolist()
l1 = np.round(line1).tolist()
l2 = np.round(line2).tolist()
result_list.append([p, l1, l2])
return result_list
sf = self._GetScreenFinder(None)
expected = []
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
expected.append(_BuildResult([0, 0], lines[0], lines[1]))
expected.append(_BuildResult([0, 1000], lines[0], lines[3]))
expected.append(_BuildResult([1000, 0], lines[1], lines[2]))
expected.append(_BuildResult([1000, 1000], lines[2], lines[3]))
# crosses 2 lines at 45 degrees.
lines.append(np.asfarray(((0, 500, 500, 0))))
expected.append(_BuildResult([0, 500], lines[0], lines[4]))
expected.append(_BuildResult([500, 0], lines[1], lines[4]))
# crosses 1 line at > 45 degrees, 1 line at < 45 degrees.
lines.append(np.asfarray(((0, 400, 600, 0))))
expected.append(_BuildResult([0, 400], lines[0], lines[5]))
# Test without previous corner data, all intersections should be found.
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Now introduce previous corners, but also reset conditions. No
# intersections should be lost.
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES + 1
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Remove reset conditions, so intersections not near corners will be lost.
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES
# First 4 intersections are the ones at the old corner locations.
expected = expected[0:4]
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
def testPointIsCloseToPreviousCorners(self):
sf = self._GetScreenFinder(None)
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
dist = math.sqrt(sf.MAX_INTERFRAME_MOTION)
sidedist1 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) - (1e-13)
sidedist2 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) + (1e-13)
point1 = (corners[3][0] + dist, corners[3][1])
self.assertTrue(sf._PointIsCloseToPreviousCorners(point1))
point2 = (corners[3][0] + sidedist1, corners[3][1] + sidedist1)
self.assertTrue(sf._PointIsCloseToPreviousCorners(point2))
point3 = (corners[1][0] + sidedist2, corners[1][1] + sidedist2)
self.assertFalse(sf._PointIsCloseToPreviousCorners(point3))
def testLooksLikeCorner(self):
# TODO: Probably easier to just do end to end tests.
pass
def testCornerData(self):
cd = self.ScreenFinder.CornerData('a', 'b', 'c', 'd', 'e')
self.assertEqual(cd.corner_index, 'a')
self.assertEqual(cd.corner_location, 'b')
self.assertEqual(cd.brightness_score, 'c')
self.assertEqual(cd.line1, 'd')
self.assertEqual(cd.line2, 'e')
cd_list = []
cd_list.append(self.ScreenFinder.CornerData(0, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(3, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(1, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(2, None, None, None, None))
cd_list.sort()
for i in range(len(cd_list)):
self.assertEqual(i, cd_list[i].corner_index)
def testFindCorners(self):
# TODO: Probably easier to just do end to end tests.
pass
def testDeDupCorners(self):
sf = self._GetScreenFinder(None)
data = []
lines = []
lines.append(np.asfarray((0, 1001, 0, -1)))
lines.append(np.asfarray((-1, 0, 1001, 0)))
lines.append(np.asfarray((1000, 1001, 1000, -1)))
lines.append(np.asfarray((-1, 1000, 1001, 1000)))
lines.append(np.asfarray((0, 10, 10, 0)))
lines.append(np.asfarray((-1, 1001, 1001, 1001)))
corners = np.asfarray(((1000, 1000), (0, 1000), (0, 0),
(1000, 0), (0, 10), (10, 0), (1000, 1001)))
data.append(self.ScreenFinder.CornerData(2, corners[2], 100,
lines[0], lines[1]))
data.append(self.ScreenFinder.CornerData(1, corners[1], 100,
lines[0], lines[3]))
data.append(self.ScreenFinder.CornerData(3, corners[3], 100,
lines[1], lines[2]))
data.append(self.ScreenFinder.CornerData(0, corners[0], 100,
lines[2], lines[3]))
data.append(self.ScreenFinder.CornerData(2, corners[4], 120,
lines[0], lines[4]))
data.append(self.ScreenFinder.CornerData(2, corners[5], 110,
lines[1], lines[4]))
data.append(self.ScreenFinder.CornerData(0, corners[6], 110,
lines[2], lines[5]))
dedup = copy.copy(data)
# Tests 2 non-duplicate corners, 1 corner with connected and unconnected
# corners, and 1 corner with two connected corners.
sf._DeDupCorners(dedup, corners)
self.assertEqual(len(dedup), 4)
self.assertIn(data[0], dedup)
self.assertIn(data[1], dedup)
self.assertIn(data[2], dedup)
self.assertIn(data[6], dedup)
# Same test, but this time the corner with connected and unconnected
# corners now only contains unconnected corners.
del data[0]
corners = np.delete(corners, 2, axis=0)
dedup2 = copy.copy(data)
sf._DeDupCorners(dedup2, corners)
self.assertEqual(len(dedup2), 4)
self.assertIn(data[3], dedup2)
self.assertIn(data[0], dedup2)
self.assertIn(data[1], dedup2)
self.assertIn(data[5], dedup2)
def testFindExactCorners(self):
sf = self._GetScreenFinder(None)
img = np.zeros((3, 3), np.uint8)
img[1][0] = 255
img[0][1] = 255
img[1][2] = 255
img[2][1] = 255
sf._frame_edges = img
corners = np.asfarray([(1, 1), (1, 1), (1, 1), (1, 1)])
expected = np.asfarray([(2, 0), (0, 0), (0, 2), (2, 2)])
ret = sf._FindExactCorners(corners)
np.testing.assert_equal(ret, expected)
img2 = np.zeros((3, 3), np.uint8)
img2[1][0] = 255
img2[1][1] = 255
img2[2][2] = 255
img2[2][1] = 255
sf._frame_edges = img2
expected2 = [(2, 1), (0, 1), (0, 2), (2, 2)]
ret2 = sf._FindExactCorners(corners)
np.testing.assert_equal(ret2, expected2)
def testSmoothCorners(self):
sf = self._GetScreenFinder(None)
corners = [[10, 10], [10, 10], [10, 10], [10, 10]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, corners)
corners = [[0, 0], [0, 0], [0, 0], [0, 0]]
expected = [[5, 5], [5, 5], [5, 5], [5, 5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
expected = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
def testGetTransform(self):
sf = self._GetScreenFinder(None)
corners = np.array([[100, 1000], [0, 1000], [0, 0], [100, 0]], np.float32)
transform, w, h = sf._GetTransform(corners, 1)
transform = np.round(transform, 2)
expected = [[1., 0., 1.], [-0., -1., 1001.], [0., -0., 1.]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 102)
self.assertEqual(h, 1002)
corners = np.array([(200, 2000), (0, 2000), (0, 0), (200, 0)], np.float32)
transform, w, h = sf._GetTransform(corners, 5)
transform = np.round(transform, 2)
expected = [[0.5, 0.0, 5.0], [-0.0, -0.5, 1005.0], [-0.0, 0.0, 1.0]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 110)
self.assertEqual(h, 1010)
def testNewScreenLocation(self):
sf = self._GetScreenFinder(None)
corners_2 = np.asfarray([[np.nan, np.nan], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_3 = np.asfarray([[1000, 1000], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_4 = np.asfarray([[1000, 1000], [0, 1000], [0, 0], [1000, 0]])
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
# Additional intersections near a corner.
lines.append(np.asfarray(((0, 3, 7, 0))))
lines.append(np.asfarray(((0, 4, 6, 0))))
intersections = sf._FindIntersections(lines)
failed = False
try:
sf._NewScreenLocation(corners_3, 1, intersections)
except self.ScreenFinder.ScreenNotFoundError:
failed = True
self.assertTrue(failed)
sf._lost_corner_frames = 10
sf._lost_corners = [True, True, True, True]
ret = sf._NewScreenLocation(corners_4, 0, intersections)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, False, False])
self.assertEqual(sf._lost_corner_frames, 0)
sf._prev_corners = corners_4
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 1)
sf._prev_corners = np.asfarray([(1000, 1000), (0, 1000),
(0, 3), (1000, 0)])
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 2)
ret = sf._NewScreenLocation(corners_2, 2, intersections)
ret = np.round(ret)
expected = [[1000, 1000], [0, 1000], [0, 3], [1000, 0]]
np.testing.assert_equal(ret, expected)
self.assertListEqual(sf._lost_corners, [True, False, True, False])
self.assertEqual(sf._lost_corner_frames, 3)
| bsd-3-clause |
dongguangming/youtube-dl | youtube_dl/extractor/gamestar.py | 123 | 2627 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
str_to_int,
unified_strdate,
)
class GameStarIE(InfoExtractor):
_VALID_URL = r'http://www\.gamestar\.de/videos/.*,(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.gamestar.de/videos/trailer,3/hobbit-3-die-schlacht-der-fuenf-heere,76110.html',
'md5': '96974ecbb7fd8d0d20fca5a00810cea7',
'info_dict': {
'id': '76110',
'ext': 'mp4',
'title': 'Hobbit 3: Die Schlacht der Fünf Heere - Teaser-Trailer zum dritten Teil',
'description': 'Der Teaser-Trailer zu Hobbit 3: Die Schlacht der Fünf Heere zeigt einige Szenen aus dem dritten Teil der Saga und kündigt den vollständigen Trailer an.',
'thumbnail': 'http://images.gamestar.de/images/idgwpgsgp/bdb/2494525/600x.jpg',
'upload_date': '20140728',
'duration': 17
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
og_title = self._og_search_title(webpage)
title = re.sub(r'\s*- Video (bei|-) GameStar\.de$', '', og_title)
url = 'http://gamestar.de/_misc/videos/portal/getVideoUrl.cfm?premium=0&videoId=' + video_id
description = self._og_search_description(webpage).strip()
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
upload_date = unified_strdate(self._html_search_regex(
r'<span style="float:left;font-size:11px;">Datum: ([0-9]+\.[0-9]+\.[0-9]+) ',
webpage, 'upload_date', fatal=False))
duration = parse_duration(self._html_search_regex(
r' Länge: ([0-9]+:[0-9]+)</span>', webpage, 'duration',
fatal=False))
view_count = str_to_int(self._html_search_regex(
r' Zuschauer: ([0-9\.]+) ', webpage,
'view_count', fatal=False))
comment_count = int_or_none(self._html_search_regex(
r'>Kommentieren \(([0-9]+)\)</a>', webpage, 'comment_count',
fatal=False))
return {
'id': video_id,
'title': title,
'url': url,
'ext': 'mp4',
'thumbnail': thumbnail,
'description': description,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count
}
| unlicense |
bmbouter/kombu | kombu/tests/async/http/test_curl.py | 8 | 5130 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from kombu.async.http.curl import READ, WRITE, CurlClient
from kombu.tests.case import (
HubCase, Mock, call, patch, case_no_pypy, case_requires, set_module_symbol,
)
@case_no_pypy
@case_requires('pycurl')
class test_CurlClient(HubCase):
class Client(CurlClient):
Curl = Mock(name='Curl')
def test_when_pycurl_missing(self):
with set_module_symbol('kombu.async.http.curl', 'pycurl', None):
with self.assertRaises(ImportError):
self.Client()
def test_max_clients_set(self):
x = self.Client(max_clients=303)
self.assertEqual(x.max_clients, 303)
def test_init(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
self.assertIsNotNone(x._multi)
self.assertIsNotNone(x._pending)
self.assertIsNotNone(x._free_list)
self.assertIsNotNone(x._fds)
self.assertEqual(
x._socket_action, x._multi.socket_action,
)
self.assertEqual(len(x._curls), x.max_clients)
self.assertTrue(x._timeout_check_tref)
x._multi.setopt.assert_has_calls([
call(_pycurl.M_TIMERFUNCTION, x._set_timeout),
call(_pycurl.M_SOCKETFUNCTION, x._handle_socket),
])
def test_close(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._timeout_check_tref = Mock(name='timeout_check_tref')
x.close()
x._timeout_check_tref.cancel.assert_called_with()
for _curl in x._curls:
_curl.close.assert_called_with()
x._multi.close.assert_called_with()
def test_add_request(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._process_queue = Mock(name='_process_queue')
x._set_timeout = Mock(name='_set_timeout')
request = Mock(name='request')
x.add_request(request)
self.assertIn(request, x._pending)
x._process_queue.assert_called_with()
x._set_timeout.assert_called_with(0)
def test_handle_socket(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
hub = Mock(name='hub')
x = self.Client(hub)
fd = Mock(name='fd1')
# POLL_REMOVE
x._fds[fd] = fd
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
hub.remove.assert_called_with(fd)
self.assertNotIn(fd, x._fds)
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
# POLL_IN
hub = x.hub = Mock(name='hub')
fds = [fd, Mock(name='fd2'), Mock(name='fd3')]
x._fds = {f: f for f in fds}
x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl)
hub.remove.assert_has_calls([call(fd)])
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
self.assertEqual(x._fds[fd], READ)
# POLL_OUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], WRITE)
# POLL_INOUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl)
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], READ | WRITE)
# UNKNOWN EVENT
hub = x.hub = Mock(name='hub')
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
# FD NOT IN FDS
hub = x.hub = Mock(name='hub')
x._fds.clear()
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
self.assertFalse(hub.remove.called)
def test_set_timeout(self):
x = self.Client()
x._set_timeout(100)
def test_timeout_check(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._process_pending_requests = Mock(name='process_pending')
x._multi.socket_all.return_value = 333, 1
_pycurl.error = KeyError
x._timeout_check(_pycurl=_pycurl)
x._multi.socket_all.return_value = None
x._multi.socket_all.side_effect = _pycurl.error(333)
x._timeout_check(_pycurl=_pycurl)
def test_on_readable_on_writeable(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._on_event = Mock(name='on_event')
fd = Mock(name='fd')
x.on_readable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN)
x.on_writable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT)
| bsd-3-clause |
coolceph/gtest | test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
kelvintaywl/tasty.py | recipe/base.py | 1 | 1576 | # -*- coding: utf-8 -*-
class Recipe(object):
INGREDIENTS = {}
COOKING_TIME = 30 # minutes
def __init__(self, portion=1, steps=[], notes=""):
self.portion = portion
self.ingredients = self.__class__.INGREDIENTS
if self.portion > 1:
for key, value in self.ingredients.items():
self.ingredients[key] = value * self.portion
self.steps = steps
self.notes = notes
def _generate_recipe(self):
ingredients = []
for k, v in self.ingredients.items():
item, unit = k.split("|", 1)
ingredients.append("%r%s of %s" % (v, unit, item))
return ingredients, self.steps, self.notes
def recipe(self):
ingredients, steps, notes = self._generate_recipe()
print("--------------------------------------------")
print("%s (approx %d minutes, serves %d)" %
(self.__class__.__name__, self.__class__.COOKING_TIME, self.portion))
print("============================================")
print("INGREDIENTS (食材)")
print("-----------------")
for item in ingredients:
print(item)
print("============================================")
print("STEPS (作り方)")
print("-------------")
for i, step in enumerate(steps, start=1):
print("%d. %s" % (i, step))
print("============================================")
print("NOTES (注意点)")
print(notes)
print("============================================")
| mit |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/estimators/estimator.py | 14 | 62939 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
| mit |
grammarly/browser-extensions | generate/lib/validictory/validator.py | 5 | 22200 | import re
import sys
import copy
from datetime import datetime
import warnings
from collections import Mapping, Container
import json
if sys.version_info[0] == 3:
_str_type = str
else:
_str_type = basestring
# if given in properties in a schema, will be used to match against any non-explicit properties found
FIELD_WILDCARD = "*"
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
class UnexpectedPropertyError(ValidationError):
"""
unexpected property encountered during validation (subclass of
:class:`ValidationError`)
"""
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
raise ValidationError(
"Value %(value)r of field '%(fieldname)s' is not in '%(format_option)s' format" % locals())
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time', '%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, (int, float)):
raise ValidationError("Value %(value)r of field '%(fieldname)s' is not a number" % locals())
if not value > 0:
raise ValidationError("Value %(value)r of field '%(fieldname)s' is not a positive number" % locals())
DEFAULT_FORMAT_VALIDATORS = {
'date-time' : validate_format_date_time,
'date' : validate_format_date,
'time' : validate_format_time,
'utc-millisec' : validate_format_utc_millisec,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
'''
def __init__(self, format_validators=None, required_by_default=True, blank_by_default=False):
if format_validators is None:
format_validators = DEFAULT_FORMAT_VALIDATORS.copy()
self._format_validators = format_validators
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in (int, long)
def validate_type_number(self, val):
return type(val) in (int, long, float)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping)
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc % params
raise ValidationError(message)
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValidationError:
pass
if not datavalid:
self._error("Value %(value)r for field '%(fieldname)s' is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_%s' % fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error("Value %(value)r for field '%(fieldname)s' is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, properties=None, location="config"):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
def validate_one_property(value, properties, location, fieldname):
if isinstance(value, dict):
if isinstance(properties, dict):
for eachProp in properties:
self.__validate(eachProp, value,
properties.get(eachProp), location + "." + fieldname)
else:
raise SchemaError("Properties definition of field '%s' is not an object" % fieldname)
if fieldname == FIELD_WILDCARD:
for actual_name, value in x.iteritems():
validate_one_property(value, properties, location, actual_name)
elif x.get(fieldname) is not None:
value = x.get(fieldname)
validate_one_property(value, properties, location, fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if not 'additionalItems' in schema and len(items) != len(value):
self._error("Length of list %(value)r for field '%(fieldname)s' is not equal to length of schema list",
value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex], items[itemIndex])
except ValueError as e:
raise type(e)("Failed to validate field '%s' list schema: %s" % (fieldname, e))
elif isinstance(items, dict):
for eachItem in value:
try:
self._validate(eachItem, items)
except ValueError as e:
# a bit of a hack: replace reference to config
# with 'list item' so error messages make sense
old_error = str(e).replace("field 'config'",
'list item')
raise type(e)("Failed to validate field '%s' list schema: %s" % (fieldname, old_error))
else:
raise SchemaError("Properties definition of field '%s' is not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required, location):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if location.startswith("."):
location = location[1:]
if fieldname != FIELD_WILDCARD and fieldname not in x and required:
self._error("Required field '%(fieldname)s' is missing from %(location)s",
None, fieldname, location=location)
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("Value %(value)r for field '%(fieldname)s' cannot be blank'",
value, fieldname)
def validate_patternProperties(self, x, fieldname, schema, patternproperties=None):
if patternproperties == None:
patternproperties = {}
value_obj = x.get(fieldname)
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.validate(value, schema)
def validate_additionalItems(self, x, fieldname, schema, additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
#print locals(), value, len(value), len(schema['items'])
self._error("Length of list %(value)r for field '%(fieldname)s' is not equal to length of schema list",
value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema,
additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty not in properties:
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and
not additionalProperties):
raise UnexpectedPropertyError(eachProperty)
self.__validate(eachProperty, value,
additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for field '%s' is not an object" % fieldname)
def validate_requires(self, x, fieldname, schema, requires=None):
warnings.warn('The "requires" attribute has been replaced by "dependencies"', DeprecationWarning)
if x.get(fieldname) is not None:
if x.get(requires) is None:
self._error("Field '%(requires)s' is required by field '%(fieldname)s'",
None, fieldname, requires=requires)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '%(dependency)s' is required by field '%(fieldname)s'",
None, fieldname, dependency=dependency)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '%(v)s' is required by field '%(k)s'",
None, fieldname, k=k, v=v)
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) in (int, float) and (not exclusive and value < minimum) or (exclusive and value <= minimum):
self._error("Value %(value)r for field '%(fieldname)s' is less than minimum value: %(minimum)f",
value, fieldname, minimum=minimum)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) in (int, float) and (not exclusive and value > maximum) or (exclusive and value >= maximum):
self._error("Value %(value)r for field '%(fieldname)s' is greater than maximum value: %(maximum)f",
value, fieldname, maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("Length of value %(value)r for field '%(fieldname)s' must be less than or equal to %(length)d",
value, fieldname, length=length)
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("Length of value %(value)r for field '%(fieldname)s' must be greater than or equal to %(length)d",
value, fieldname, length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error("Value %(value)r for field '%(fieldname)s' does not match regular expression '%(pattern)s'",
value, fieldname, pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error(
"Value %(value)r for field '%(fieldname)s' is not unique",
value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a container", (options, fieldname))
if value not in options:
self._error("Value %(value)r for field '%(fieldname)s' is not in the enumeration: %(options)r",
value, fieldname, options=options)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string."
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error("Value %(value)r field '%(fieldname)s' is not divisible by '%(divisibleBy)s'.",
x.get(fieldname), fieldname, divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, disallow)
except ValidationError:
return
self._error("Value %(value)r of type %(disallow)s is disallowed for field '%(fieldname)s'",
x.get(fieldname), fieldname, disallow=disallow)
def validate(self, data, schema, location="_data"):
'''
Validates a piece of json data against the provided json-schema.
'''
self._validate(data, schema, location="")
def _validate(self, data, schema, location="config"):
self.__validate("config", {"config": data}, schema, location)
def __validate(self, fieldname, data, schema, location):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError("Schema structure is invalid.")
newschema = copy.copy(schema)
# handle 'optional', replace it with 'required'
if 'required' in schema and 'optional' in schema:
raise SchemaError('cannot specify optional and required')
elif 'optional' in schema:
warnings.warn('The "optional" attribute has been replaced by "required"', DeprecationWarning)
newschema['required'] = not schema['optional']
elif 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
def validate_one_field(fieldname, data, schema, location):
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if (schemaprop == "properties") or (schemaprop == "required"):
validator(data, fieldname, schema,
newschema.get(schemaprop), location)
elif validator:
validator(data, fieldname, schema,
newschema.get(schemaprop))
if fieldname == FIELD_WILDCARD:
for fieldname in data:
validate_one_field(fieldname, data, schema, location)
else:
validate_one_field(fieldname, data, schema, location)
return data
__all__ = ['SchemaValidator']
| bsd-3-clause |
jonathonwalz/ansible | lib/ansible/plugins/action/wait_for_connection.py | 91 | 4468 | # (c) 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# CI-required python3 boilerplate
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.module_utils.pycompat24 import get_exception
from ansible.plugins.action import ActionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_DELAY = 0
DEFAULT_SLEEP = 1
DEFAULT_TIMEOUT = 600
def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
while datetime.utcnow() < max_end_time:
try:
what(connect_timeout)
if what_desc:
display.debug("wait_for_connection: %s success" % what_desc)
return
except Exception:
e = get_exception()
if what_desc:
display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep))
time.sleep(sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, str(e)))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))
if self._play_context.check_mode:
display.vvv("wait_for_connection: skipping for check_mode")
return dict(skipped=True)
result = super(ActionModule, self).run(tmp, task_vars)
def ping_module_test(connect_timeout):
''' Test ping module, if available '''
display.vvv("wait_for_connection: attempting ping module test")
# call connection reset between runs if it's there
try:
self._connection._reset()
except AttributeError:
pass
# Use win_ping on winrm/powershell, else use ping
if hasattr(self._connection, '_shell_type') and self._connection._shell_type == 'powershell':
ping_result = self._execute_module(module_name='win_ping', module_args=dict(), tmp=tmp, task_vars=task_vars)
else:
ping_result = self._execute_module(module_name='ping', module_args=dict(), tmp=tmp, task_vars=task_vars)
# Test module output
if ping_result['ping'] != 'pong':
raise Exception('ping test failed')
start = datetime.now()
if delay:
time.sleep(delay)
try:
# If the connection has a transport_test method, use it first
if hasattr(self._connection, 'transport_test'):
self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)
# Use the ping module test to determine end-to-end connectivity
self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test success", sleep=sleep)
except TimedOutException:
e = get_exception()
result['failed'] = True
result['msg'] = str(e)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
return result
| gpl-3.0 |
sourabhv/python-koans-solutions | python3/koans/about_exceptions.py | 1 | 1883 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutExceptions(Koan):
class MySpecialError(RuntimeError):
pass
def test_exceptions_inherit_from_exception(self):
mro = self.MySpecialError.mro()
self.assertEqual('RuntimeError', mro[1].__name__)
self.assertEqual('Exception', mro[2].__name__)
self.assertEqual('BaseException', mro[3].__name__)
self.assertEqual('object', mro[4].__name__)
def test_try_clause(self):
result = None
try:
self.fail("Oops")
except Exception as ex:
result = 'exception handled'
ex2 = ex
self.assertEqual('exception handled', result)
self.assertEqual(True, isinstance(ex2, Exception))
self.assertEqual(False, isinstance(ex2, RuntimeError))
self.assertTrue(issubclass(RuntimeError, Exception), \
"RuntimeError is a subclass of Exception")
self.assertEqual("Oops", ex2.args[0])
def test_raising_a_specific_error(self):
result = None
try:
raise self.MySpecialError("My Message")
except self.MySpecialError as ex:
result = 'exception handled'
msg = ex.args[0]
self.assertEqual('exception handled', result)
self.assertEqual('My Message', msg)
def test_else_clause(self):
result = None
try:
pass
except RuntimeError:
result = 'it broke'
pass
else:
result = 'no damage done'
self.assertEqual('no damage done', result)
def test_finally_clause(self):
result = None
try:
self.fail("Oops")
except:
# no code here
pass
finally:
result = 'always run'
self.assertEqual('always run', result)
| mit |
Lessig2016/pledgeservice | lib/validictory/validator.py | 11 | 26100 | import re
import sys
import copy
import socket
from datetime import datetime
from decimal import Decimal
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
class FieldValidationError(ValidationError):
"""
validation error that refers to a specific field
Includes `fieldname` and `value` attributes.
"""
def __init__(self, message, fieldname, value):
super(FieldValidationError, self).__init__(message)
self.fieldname = fieldname
self.value = value
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
raise FieldValidationError(
"Value %(value)r of field '%(fieldname)s' is not in "
"'%(format_option)s' format" % locals(), fieldname, value)
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time',
'%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, _int_types + (float, Decimal)):
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
"is not a number" % locals(), fieldname,
value)
if not value > 0:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
" is not a positive number" % locals(),
fieldname, value)
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
socket.inet_aton(value)
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1"
# to "0.0.0.1"
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s'"
"is not a ip-address" % locals(), fieldname,
value)
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
:param disallow_unknown_properties: defaults to False, set to True to
disallow properties not listed in the schema definition
:param apply_default_to_data: defaults to False, set to True to modify the
data in case the schema definition includes a "default" property
'''
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False, disallow_unknown_properties=False,
apply_default_to_data=False):
self._format_validators = {}
# add the default format validators
for key, value in DEFAULT_FORMAT_VALIDATORS.items():
self.register_format_validator(key, value)
# register any custom format validators if they were provided
if format_validators:
for key, value in format_validators.items():
self.register_format_validator(key, value)
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.disallow_unknown_properties = disallow_unknown_properties
self.apply_default_to_data = apply_default_to_data
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float, Decimal,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping) or (hasattr(val, 'keys')
and hasattr(val, 'items'))
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc % params
raise FieldValidationError(message, fieldname, value)
def _validate_unknown_properties(self, schema, data, fieldname):
schema_properties = set(schema)
data_properties = set(data)
delta = data_properties - schema_properties
if delta:
unknowns = ''
for x in delta:
unknowns += '"%s", ' % x
unknowns = unknowns.rstrip(", ")
raise SchemaError('Unknown properties for field '
'"%(fieldname)s": %(unknowns)s' %
locals())
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
errorlist = []
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValidationError as err:
errorlist.append(err)
if not datavalid:
self._error("Value %(value)r for field '%(fieldname)s' "
"doesn't match any of %(numsubtypes)d "
"subtypes in %(fieldtype)s; "
"errorlist = %(errorlist)r",
value, fieldname, numsubtypes=len(fieldtype),
fieldtype=fieldtype, errorlist=errorlist)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_%s' %
fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error("Value %(value)r for field '%(fieldname)s' "
"is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, dict):
if isinstance(properties, dict):
if self.disallow_unknown_properties:
self._validate_unknown_properties(properties, value,
fieldname)
for eachProp in properties:
self.__validate(eachProp, value,
properties.get(eachProp))
else:
raise SchemaError("Properties definition of field '%s' is "
"not an object" % fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if (not 'additionalItems' in schema and
len(items) != len(value)):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length "
"of schema list", value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex],
items[itemIndex])
except FieldValidationError as e:
raise type(e)("Failed to validate field '%s' "
"list schema: %s" %
(fieldname, e), fieldname,
e.value)
elif isinstance(items, dict):
for eachItem in value:
if (self.disallow_unknown_properties and
'properties' in items):
self._validate_unknown_properties(
items['properties'], eachItem, fieldname)
try:
self._validate(eachItem, items)
except FieldValidationError as e:
# a bit of a hack: replace reference to _data
# with 'list item' so error messages make sense
old_error = str(e).replace("field '_data'",
'list item')
raise type(e)("Failed to validate field '%s' list "
"schema: %s" %
(fieldname, old_error), fieldname,
e.value)
else:
raise SchemaError("Properties definition of field '%s' is "
"not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if fieldname not in x and required:
self._error("Required field '%(fieldname)s' is missing",
None, fieldname)
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("Value %(value)r for field '%(fieldname)s' cannot be "
"blank'", value, fieldname)
def validate_patternProperties(self, x, fieldname, schema,
patternproperties=None):
if patternproperties is None:
patternproperties = {}
value_obj = x.get(fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.validate(value, schema)
def validate_additionalItems(self, x, fieldname, schema,
additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length of schema "
"list", value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema,
additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# Shouldn't be validating additionalProperties on non-dicts
value = x.get(fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
patterns = schema["patternProperties"].keys() if 'patternProperties' in schema else []
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty not in properties and not \
any(re.match(p, eachProperty) for p in patterns):
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and not
additionalProperties):
self._error("additional property '%(prop)s' "
"not defined by 'properties' or "
"'patternProperties' are not "
"allowed in field '%(fieldname)s'",
None, fieldname, prop=eachProperty)
self.__validate(eachProperty, value,
additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for "
"field '%s' is not an object" % fieldname)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '%(dependency)s' is required by "
"field '%(fieldname)s'",
None, fieldname, dependency=dependency)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '%(v)s' is required by field "
"'%(k)s'", None, fieldname, k=k, v=v)
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"less than minimum value: %(minimum)f",
value, fieldname, minimum=minimum)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"greater than maximum value: %(maximum)f",
value, fieldname, maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be less than or equal to %(length)d",
value, fieldname, length=length)
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be greater than or equal to %(length)d",
value, fieldname, length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error("Value %(value)r for field '%(fieldname)s' does "
"not match regular expression '%(pattern)s'",
value, fieldname, pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error(
"Value %(value)r for field '%(fieldname)s' is not unique",
value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a "
"container", (options, fieldname))
if value not in options:
self._error("Value %(value)r for field '%(fieldname)s' is not "
"in the enumeration: %(options)r",
value, fieldname, options=options)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string"
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error("Value %(value)r field '%(fieldname)s' is not "
"divisible by '%(divisibleBy)s'.",
x.get(fieldname), fieldname, divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, disallow)
except ValidationError:
return
self._error("Value %(value)r of type %(disallow)s is disallowed for "
"field '%(fieldname)s'",
x.get(fieldname), fieldname, disallow=disallow)
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
self._validate(data, schema)
def _validate(self, data, schema):
self.__validate("_data", {"_data": data}, schema)
def __validate(self, fieldname, data, schema):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError(
"Type for field '%s' must be 'dict', got: '%s'" %
(fieldname, type(schema).__name__))
newschema = copy.copy(schema)
if 'optional' in schema:
raise SchemaError('The "optional" attribute has been replaced'
' by "required"')
if 'requires' in schema:
raise SchemaError('The "requires" attribute has been replaced'
' by "dependencies"')
if 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, schema,
newschema.get(schemaprop))
if self.apply_default_to_data and 'default' in schema:
try:
self.validate_type(
x={'_ds': schema['default']},
fieldname='_ds',
schema=schema,
fieldtype=schema['type'] if 'type' in schema else None
)
except FieldValidationError as exc:
raise SchemaError(exc)
if not fieldname in data:
data[fieldname] = schema['default']
return data
__all__ = ['SchemaValidator', 'FieldValidationError']
| agpl-3.0 |
shlomozippel/ansible | plugins/inventory/cobbler.py | 8 | 4294 | #!/usr/bin/python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(http://cobbler.github.com). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
See http://ansible.github.com/api.html for more info
Tested with Cobbler 2.0.11.
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import xmlrpclib
import shlex
try:
import json
except:
import simplejson as json
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
conn = xmlrpclib.Server("http://127.0.0.1/cobbler_api", allow_none=True)
###################################################
# executed with no parameters, return the list of
# all groups and hosts
if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
systems = conn.get_item_names('system')
groups = { 'ungrouped' : [] }
for system in systems:
data = conn.get_blended_data(None, system)
dns_name = None
interfaces = data['interfaces']
for (iname, ivalue) in interfaces.iteritems():
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None:
dns_name = this_dns_name
if dns_name is None:
continue
classes = data['mgmt_classes']
for cls in classes:
if cls not in groups:
groups[cls] = []
# hostname is not really what we want to insert, really insert the
# first DNS name but no further DNS names
groups[cls].append(dns_name)
print json.dumps(groups)
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
# look up the system record for the given DNS name
result = conn.find_system_by_dns_name(sys.argv[2])
system = result.get('name', None)
data = {}
if system is None:
print json.dumps({})
sys.exit(1)
data = conn.get_system_for_koan(system)
# return the ksmeta data for that system
metadata = data['ks_meta']
tokens = shlex.split(metadata)
results = {}
for t in tokens:
if t.find("=") != -1:
(k,v) = t.split("=",1)
results[k]=v
print json.dumps(results)
sys.exit(0)
else:
print "usage: --list ..OR.. --host <hostname>"
sys.exit(1)
| gpl-3.0 |
amohanta/thug | src/DOM/Sidebar.py | 9 | 1718 | #!/usr/bin/env python
#
# Sidebar.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import logging
log = logging.getLogger("Thug")
class Sidebar(object):
def __init__(self):
self._providers = set()
self._engines = set()
self._favorites = set()
self._generators = set()
def addMicrosummaryGenerator(self, generatorURL):
self._generators.add(generatorURL)
def addPanel(self, title, URL, customizeURL):
self._favorites.add((title, URL, customizeURL))
def addPersistentPanel(self, title, URL, customizeURL):
self._favorites.add((title, URL, customizeURL))
def addSearchEngine(self, engineURL, iconURL, message, suggestedCategory):
self._enginess.add((engineURL, iconURL, message, suggestedCategory))
def AddSearchProvider(self, URL):
self._providers.add(URL)
def IsSearchProviderInstalled(self, URL):
if URL in self._providers:
return 1 # A matching search provider is installed, but it is not the default.
return 0 # No installed search provider was found with the specified prefix
| gpl-2.0 |
vipul-sharma20/oh-mainline | mysite/customs/views.py | 15 | 14664 | # This file is part of OpenHatch.
# Copyright (C) 2010, 2011 Jack Grigg
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
import django.shortcuts
from reversion import revision
import mysite.base.decorators
import mysite.customs.forms
import mysite.customs.models
# Trivial helper to avoid repeated exception handling
def get_tracker_model_or_404(tracker_model_name):
try:
return mysite.customs.models.TrackerModel.get_by_name(tracker_model_name)
except ValueError:
raise Http404
# Lists all the stored trackers of a selected type (Bugzilla, Trac etc.)
@mysite.base.decorators.view
def list_trackers(request, tracker_types_form=None):
data = {}
if request.POST:
tracker_types_form = mysite.customs.forms.TrackerTypesForm(
request.POST, prefix='list_trackers')
if tracker_types_form.is_valid():
tracker_type = tracker_types_form.cleaned_data['tracker_type']
else:
tracker_type = 'bugzilla'
else:
tracker_type = 'bugzilla'
trackers = get_tracker_model_or_404(tracker_type).all_trackers.all()
data['tracker_type'] = tracker_type
data['trackers'] = trackers
notification_id = request.GET.get('notification_id', None)
notifications = {
'add-success': 'Bugtracker successfully added! Bugs from this tracker should start appearing within 24 hours.',
'edit-success': 'Bugtracker successfully edited! New settings should take effect within 24 hours.',
'delete-success': 'Bugtracker successfully deleted!',
'tracker-existence-fail': 'Hmm, could not find the requested tracker.',
'tracker-url-existence-fail': 'Hmm, could not find the requested tracker URL.',
}
data['customs_notification'] = notifications.get(notification_id, '')
if tracker_types_form is None:
tracker_types_form = mysite.customs.forms.TrackerTypesForm(
prefix='list_trackers')
data['tracker_types_form'] = tracker_types_form
return (request, 'customs/list_trackers.html', data)
@login_required
def add_tracker(request, tracker_type, tracker_form=None):
data = {}
tracker_model = get_tracker_model_or_404(tracker_type)
data['tracker_type_pretty'] = tracker_model.namestr
data['action_url'] = reverse(add_tracker_do, args=[tracker_type])
if tracker_form is None:
# This is what we'll pass in to the form. By default: blank.
initial_data = {}
# If the user passed in a ?project_id= value, then store that in
# the form's created_for_project values.
project_id = request.GET.get('project_id', None)
if project_id is not None:
try:
project = mysite.search.models.Project.objects.get(
id=project_id)
initial_data['created_for_project'] = project
except mysite.search.models.Project.DoesNotExist:
pass # no biggie
tracker_form = tracker_model.get_form()(
prefix='add_tracker',
initial=initial_data)
data['tracker_form'] = tracker_form
return mysite.base.decorators.as_view(request, 'customs/add_tracker.html', data, None)
@login_required
@revision.create_on_success
def add_tracker_do(request, tracker_type):
tracker_model = get_tracker_model_or_404(tracker_type)
tracker_form = tracker_model.get_form()(
request.POST, prefix='add_tracker')
if tracker_form.is_valid():
tracker_name = tracker_form.cleaned_data['tracker_name']
# Tracker form is valid, so save away!
tracker = tracker_form.save()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Added the %s tracker' % tracker_name
# Send them off to add some URLs
return HttpResponseRedirect(reverse(add_tracker_url, args=[tracker_type, tracker.id, tracker_name]))
else:
return add_tracker(request,
tracker_type=tracker_type,
tracker_form=tracker_form)
@login_required
def add_tracker_url(request, tracker_type, tracker_id, tracker_name, url_form=None):
data = {}
tracker_model = get_tracker_model_or_404(tracker_type)
if url_form or tracker_model.get_urlform():
if url_form is None:
try:
tracker_obj = mysite.customs.models.TrackerModel.get_instance_by_id(
tracker_id)
url_obj = tracker_model.get_urlmodel()(
tracker=tracker_obj)
url_form = tracker_model.get_urlform()(
instance=url_obj, prefix='add_tracker_url')
except tracker_model.DoesNotExist:
url_form = tracker_model.get_urlform()(
prefix='add_tracker_url')
data['url_form'] = url_form
data['tracker_name'] = tracker_name
data['tracker_id'] = tracker_id
data['cancel_url'] = reverse(edit_tracker, args=[
tracker_type, tracker_id, tracker_name])
data['add_more_url'] = reverse(add_tracker_url_do, args=[
tracker_type, tracker_id, tracker_name])
data['finish_url'] = reverse(add_tracker_url_do, args=[
tracker_type, tracker_id, tracker_name])
data['finish_url'] += '?finished=true'
return mysite.base.decorators.as_view(request, 'customs/add_tracker_url.html', data, None)
else:
return HttpResponseRedirect(reverse(list_trackers))
@login_required
@revision.create_on_success
def add_tracker_url_do(request, tracker_type, tracker_id, tracker_name):
url_form = None
tracker_model = get_tracker_model_or_404(tracker_type)
tracker_obj = mysite.customs.models.TrackerModel.get_instance_by_id(
tracker_id)
url_obj = tracker_model.get_urlmodel()(
tracker=tracker_obj)
url_form = tracker_model.get_urlform()(
request.POST, instance=url_obj, prefix='add_tracker_url')
if url_form.is_valid():
# It's valid so save it!
url_form.save()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Added URL to the %s tracker' % tracker_name
# Do they want to add another URL?
if request.GET.get('finished', None) == 'true':
return HttpResponseRedirect(reverse(list_trackers) +
'?notification_id=add-success')
else:
return HttpResponseRedirect(reverse(add_tracker_url, args=[tracker_type, tracker_id, tracker_name]))
else:
return add_tracker_url(
request,
tracker_id=tracker_id,
tracker_type=tracker_type,
tracker_name=tracker_name,
url_form=url_form)
@login_required
def edit_tracker(request, tracker_type, tracker_id, tracker_name, tracker_form=None):
data = {}
tracker_model = get_tracker_model_or_404(tracker_type)
if tracker_model:
try:
tracker_obj = tracker_model.all_trackers.get(
pk=tracker_id, tracker_name=tracker_name)
if tracker_form is None:
tracker_form = tracker_model.get_form()(
instance=tracker_obj, prefix='edit_tracker')
# Set the initial value for github_url field
if tracker_type == 'github':
tracker_form.initial[
'github_url'] = tracker_form.instance.get_github_url()
tracker_urlmodel = tracker_model.get_urlmodel()
tracker_urlform = tracker_model.get_urlform()
if tracker_urlmodel:
tracker_urls = tracker_urlmodel.objects.filter(
tracker=tracker_obj)
else:
tracker_urls = []
except tracker_model.DoesNotExist:
return HttpResponseRedirect(reverse(list_trackers) +
'?notification_id=tracker-existence-fail')
data['tracker_name'] = tracker_name
data['tracker_id'] = tracker_id
data['tracker_type'] = tracker_type
data['tracker_form'] = tracker_form
data['tracker_urls'] = tracker_urls
data['tracker_urlmodel'] = tracker_urlmodel
if tracker_urlform:
data['tracker_urlform'] = tracker_urlform()
return mysite.base.decorators.as_view(request, 'customs/edit_tracker.html', data, None)
else:
return HttpResponseRedirect(reverse(list_trackers))
@login_required
@revision.create_on_success
def edit_tracker_do(request, tracker_type, tracker_id, tracker_name):
tracker_model = get_tracker_model_or_404(tracker_type)
tracker_obj = tracker_model.all_trackers.get(
pk=tracker_id, tracker_name=tracker_name)
tracker_form = tracker_model.get_form()(
request.POST, instance=tracker_obj, prefix='edit_tracker')
if tracker_form.is_valid():
tracker_form.save()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Edited the %s tracker' % tracker_name
return HttpResponseRedirect(reverse(list_trackers) +
'?notification_id=edit-success')
else:
return edit_tracker(request,
tracker_type=tracker_type,
tracker_id=tracker_id,
tracker_name=tracker_name,
tracker_form=tracker_form)
@login_required
def edit_tracker_url(request, tracker_type, tracker_id, tracker_name, url_id, url_form=None):
data = {}
tracker_model = get_tracker_model_or_404(tracker_type)
try:
url_obj = tracker_model.get_urlmodel().objects.get(id=url_id)
if url_form is None:
url_form = tracker_model.get_urlform()(
instance=url_obj, prefix='edit_tracker_url')
except tracker_model.get_urlmodel().DoesNotExist:
return HttpResponseRedirect(
reverse(list_trackers) +
'?notification_id=tracker-url-existence-fail')
data['tracker_name'] = tracker_name
data['tracker_id'] = tracker_id
data['tracker_type'] = tracker_type
data['url_id'] = url_id
data['url_form'] = url_form
data['cancel_url'] = reverse(
edit_tracker, args=[tracker_type, tracker_id, tracker_name])
return mysite.base.decorators.as_view(request, 'customs/edit_tracker_url.html', data, None)
@login_required
@revision.create_on_success
def edit_tracker_url_do(request, tracker_type, tracker_id, tracker_name, url_id):
url_form = None
tracker_model = get_tracker_model_or_404(tracker_type)
url_obj = tracker_model.objects.get(
id=url_id)
url_form = tracker_model.get_urlform()(
request.POST, instance=url_obj, prefix='edit_tracker_url')
if url_form.is_valid():
url_form.save()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Edited URL for the %s tracker' % tracker_name
return HttpResponseRedirect(reverse(edit_tracker, args=[tracker_type, tracker_id, tracker_name]) +
'?edit-url-success')
else:
return edit_tracker_url(request,
tracker_type=tracker_type,
tracker_id=tracker_id,
tracker_name=tracker_name,
url_id=url_id,
url_form=url_form)
@login_required
def delete_tracker(request, tracker_type, tracker_id, tracker_name):
tracker_model = get_tracker_model_or_404(tracker_type)
tracker = django.shortcuts.get_object_or_404(
tracker_model.all_trackers,
pk=tracker_id, tracker_name=tracker_name)
if request.method == 'POST':
return delete_tracker_do(
request, tracker_type, tracker.id, tracker.tracker_name)
# Else...
data = {}
data['tracker_name'] = tracker_name
data['tracker_type'] = tracker_type
data['tracker_id'] = tracker_id
return mysite.base.decorators.as_view(request, 'customs/delete_tracker.html', data, None)
@login_required
@revision.create_on_success
def delete_tracker_do(request, tracker_type, tracker_id, tracker_name):
tracker_model = get_tracker_model_or_404(tracker_type)
tracker = tracker_model.all_trackers.get(
pk=tracker_id, tracker_name=tracker_name)
tracker.delete()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Deleted the %s tracker' % tracker_name
# Tell them it worked.
return HttpResponseRedirect(reverse(list_trackers) +
'?notification_id=delete-success')
@login_required
def delete_tracker_url(request, tracker_type, tracker_id, tracker_name, url_id):
data = {}
tracker_model = get_tracker_model_or_404(tracker_type)
url_obj = tracker_model.get_urlmodel().objects.get(id=url_id)
data['tracker_name'] = tracker_name
data['tracker_id'] = tracker_id
data['tracker_type'] = tracker_type
data['url_id'] = url_id
data['url'] = url_obj.url
return mysite.base.decorators.as_view(request, 'customs/delete_tracker_url.html', data, None)
@login_required
@revision.create_on_success
def delete_tracker_url_do(request, tracker_type, tracker_id, tracker_name, url_id):
tracker_model = get_tracker_model_or_404(tracker_type)
url_obj = tracker_model.get_urlmodel().objects.get(id=url_id)
url_obj.delete()
# Set the revision meta data.
revision.user = request.user
revision.comment = 'Deleted URL from the %s tracker' % tracker_name
# Tell them it worked.
return HttpResponseRedirect(reverse(edit_tracker, args=[tracker_type, tracker_id, tracker_name]) +
'?delete-url-success')
| agpl-3.0 |
colinbrislawn/bioconda-recipes | recipes/phylip/phylip.py | 48 | 8900 | #!/usr/bin/env python
#
# Wrapper script for phylip program when installed from
# bioconda. Adapted from shell scripts provided in the biolbuilds
# conda recipe by Cheng H. Lee.
import sys
import os
import subprocess
def main():
print("running main")
print(sys.argv)
bindir = get_script_path(sys.argv[0])
sharedir= get_script_path(bindir+"/dnapars")
print(sharedir)
if len(sys.argv) == 1:
print("Usage: {prog} <program>".format(prog=sys.argv[0]))
print("Existing programs are: {progs}".format(progs=os.listdir(sharedir)))
sys.exit(1)
progname = sys.argv[1]
program = bindir+"/"+progname
if progname == "test": # hidden test of conda phylip installation
test(bindir)
elif(os.path.isfile(program)):
subprocess.check_call(program)
else:
print("{prog} does not exist in Phylip".format(prog=progname))
usage()
sys.exit(1)
def usage():
print("Usage: {prog} <program>".format(prog=sys.argv[0]))
print("Existing programs are: {progs}".format(progs=os.listdir(bindir)))
def get_script_path(script):
return os.path.dirname(os.path.realpath(script))
# Main function for testing the conda installation of phylip
# This simply tests that phylip can process infiles without without error code
def test(bindir):
params = "0\ny\n"
out = open("infile", "wt")
out.write(infiles["testdna"])
out.close()
for prog in ["dnapars","dnaml","dnadist","dnapenny","dnacomp","dnamlk"]: #,"dnainvar"
testprog(prog, bindir,params)
out = open("infile", "wt")
out.write(infiles["testprot"])
out.close()
for prog in ["protpars","protdist","proml","promlk"]:
testprog(prog, bindir, params)
out = open("infile", "wt")
out.write(infiles["testdisc"])
out.close()
for prog in ["pars","penny","dollop","dolpenny","clique","mix"]:
testprog(prog, bindir,params)
out = open("infile", "wt")
out.write(infiles["testrest"])
out.close()
for prog in ["restml","restdist"]:
testprog(prog, bindir, params)
out = open("infile", "wt")
out.write(infiles["testdist"])
out.close()
for prog in ["fitch","kitsch","neighbor"]:
testprog(prog, bindir,params)
out = open("intree", "wt")
out.write(infiles["testtree"])
out.close()
for prog in ["drawtree", "drawgram"]:
params = "0\nl\nm\ny\n"
testprog(prog, bindir,params)
# testing the java gui versions require user interaction
# Not good for automatic istallations -- comment out for now,
# but keep for debug?
''' for prog in ["drawtree_gui", "drawgram_gui"]:
print("testing " + prog)
program = bindir+"/"+prog
outfile = open(prog+".out",'wt')
try:
subprocess.run(program, universal_newlines=True,input=params,stdout=outfile, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as e:
print(e)
subprocess.call(["cat", prog+".out"], shell=True)
raise
print("passed; cleaning up")
subprocess.call(["rm", "-f", "infile","plotfile.ps"])'''
# Help function for testing the conda installation of phylip
def testprog(prog, bindir, params):
print("testing " + prog + "...",)
program = bindir+"/"+prog
outfile = open(prog+".out",'wt')
try:
process = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=outfile, stderr=subprocess.STDOUT, universal_newlines=True)
process.communicate(input=params)
except subprocess.CalledProcessError as e:
print(e)
subprocess.call(["cat", prog+".out"])
raise
print("passed; cleaning up")
subprocess.call(["rm", "-f", "outtree", "outfile", "plotfile"])
# Content of test files for testing the conda installation of phylip
infiles = {
"testdna" :
""" 7 232
Bovine CCAAACCTGT CCCCACCATC TAACACCAAC CCACATATAC AAGCTAAACC AAAAATACCA
Mouse CCAAAAAAAC ATCCAAACAC CAACCCCAGC CCTTACGCAA TAGCCATACA AAGAATATTA
Gibbon CTATACCCAC CCAACTCGAC CTACACCAAT CCCCACATAG CACACAGACC AACAACCTCC
Orang CCCCACCCGT CTACACCAGC CAACACCAAC CCCCACCTAC TATACCAACC AATAACCTCT
Gorilla CCCCATTTAT CCATAAAAAC CAACACCAAC CCCCATCTAA CACACAAACT AATGACCCCC
Chimp CCCCATCCAC CCATACAAAC CAACATTACC CTCCATCCAA TATACAAACT AACAACCTCC
Human CCCCACTCAC CCATACAAAC CAACACCACT CTCCACCTAA TATACAAATT AATAACCTCC
TACTACTAAA AACTCAAATT AACTCTTTAA TCTTTATACA ACATTCCACC AACCTATCCA
TACAACCATA AATAAGACTA ATCTATTAAA ATAACCCATT ACGATACAAA ATCCCTTTCG
CACCTTCCAT ACCAAGCCCC GACTTTACCG CCAACGCACC TCATCAAAAC ATACCTACAA
CAACCCCTAA ACCAAACACT ATCCCCAAAA CCAACACACT CTACCAAAAT ACACCCCCAA
CACCCTCAAA GCCAAACACC AACCCTATAA TCAATACGCC TTATCAAAAC ACACCCCCAA
CACTCTTCAG ACCGAACACC AATCTCACAA CCAACACGCC CCGTCAAAAC ACCCCTTCAG
CACCTTCAGA ACTGAACGCC AATCTCATAA CCAACACACC CCATCAAAGC ACCCCTCCAA
CACAAAAAAA CTCATATTTA TCTAAATACG AACTTCACAC AACCTTAACA CATAAACATA
TCTAGATACA AACCACAACA CACAATTAAT ACACACCACA ATTACAATAC TAAACTCCCA
CACAAACAAA TGCCCCCCCA CCCTCCTTCT TCAAGCCCAC TAGACCATCC TACCTTCCTA
TTCACATCCG CACACCCCCA CCCCCCCTGC CCACGTCCAT CCCATCACCC TCTCCTCCCA
CATAAACCCA CGCACCCCCA CCCCTTCCGC CCATGCTCAC CACATCATCT CTCCCCTTCA
CACAAATTCA TACACCCCTA CCTTTCCTAC CCACGTTCAC CACATCATCC CCCCCTCTCA
CACAAACCCG CACACCTCCA CCCCCCTCGT CTACGCTTAC CACGTCATCC CTCCCTCTCA
CCCCAGCCCA ACACCCTTCC ACAAATCCTT AATATACGCA CCATAAATAA CA
TCCCACCAAA TCACCCTCCA TCAAATCCAC AAATTACACA ACCATTAACC CA
GCACGCCAAG CTCTCTACCA TCAAACGCAC AACTTACACA TACAGAACCA CA
ACACCCTAAG CCACCTTCCT CAAAATCCAA AACCCACACA ACCGAAACAA CA
ACACCTCAAT CCACCTCCCC CCAAATACAC AATTCACACA AACAATACCA CA
ACATCTTGAC TCGCCTCTCT CCAAACACAC AATTCACGCA AACAACGCCA CA
ACACCTTAAC TCACCTTCTC CCAAACGCAC AATTCGCACA CACAACGCCA CA
""",
"testprot" :
""" 3 474
CAM ---TTETIQS NANLAPLPPH VPEHLVFDFD MYNPSN--LS AGVQEAWAVL
TERP ----MDARAT IPEHIARTVI LPQGYADDEV IYPAFK--WL RDEQPLAMAH
BM3 TIKEMPQPKT FGELKNLPLL NTDKPVQALM KIADELGEIF KFEAPGRVTR
QESNVPDLVW TRCNGG---H WIATRGQLIR EAY-EDYRHF SSECPFIPRE
IEGYDPMWIA TKHADV---M QIGKQPGLFS NAEGSEILYD QNNEAFMRSI
YLS-SQRLIK EACDESRFDK NLSQALKFVR DFAGDGLFTS WTHEKNWKKA
AGEAYDFIP- -TSMDPPEQR QFRALANQVV GMPVVDKLEN RIQELACSLI
SGGCPHVIDS LTSMDPPTHT AYRGLTLNWF QPASIRKLEE NIRRIAQASV
HNILLPSFS- -QQAMKGYHA MMVDIAVQLV QKWERLNADE HIEVPEDMTR
ESLR-PQGQC NFTEDYAEPF PIRIFMLLAG LPEEDIPHLK YLTDQMT---
QRLLDFDGEC DFMTDCALYY PLHVVMTALG VPEDDEPLML KLTQDFFGVH
LTLD-TIGLC GFNYRFNSFY RDQPHPFITS MVRALDEAMN KLQRANP--D
RPD------- ------GSMT FAEAKEALYD YLIPIIEQRR QKP--GTDAI
EPDEQAVAAP RQSADEAARR FHETIATFYD YFNGFTVDRR SCP--KDDVM
DPAYD----- -----ENKRQ FQEDIKVMND LVDKIIADRK ASGEQSDDLL
SIVANGQVN- -GRPITSDEA KRMCGLLLVG GLDTVVNFLS FSMEFLAKSP
SLLANSKLD- -GNYIDDKYI NAYYVAIATA GHDTTSSSSG GAIIGLSRNP
THMLNGKDPE TGEPLDDENI RYQIITFLIA GHETTSGLLS FALYFLVKNP
EHRQELIERP E--------- --------RI PAACEELLRR FS-LVADGRI
EQLALAKSDP A--------- --------LI PRLVDEAVRW TAPVKSFMRT
HVLQKAAEEA ARVLVDPVPS YKQVKQLKYV GMVLNEALRL WPTAPAFSLY
LTSDYEFHGV Q-LKKGDQIL LPQMLSGLDE REN-ACPMHV DFSRQK----
ALADTEVRGQ N-IKRGDRIM LSYPSANRDE EVF-SNPDEF DITRFP----
AKEDTVLGGE YPLEKGDELM VLIPQLHRDK TIWGDDVEEF RPERFENPSA
---VSHTTFG HGSHLCLGQH LARREIIVTL KEWLTRIPDF SIAPGAQIQH
---NRHLGFG WGAHMCLGQH LAKLEMKIFF EELLPKLKSV ELS-GPPRLV
IPQHAFKPFG NGQRACIGQQ FALHEATLVL GMMLKHFDFE DHT-NYELDI
KSGIVSGVQA LPLVWDPATT KAV-
ATNFVGGPKN VPIRFTKA-- ----
KETLTLKPEG FVVKAKSKKI PLGG
""",
"testdisc" :
""" 3 10
CAM 0000000000
TERP 0000011111
BM3 0001111111
""",
"testrest" :
""" 5 13 2
Alpha ++-+-++--+++-
Beta ++++--+--+++-
Gamma -+--+-++-+-++
Delta ++-+----++---
Epsilon ++++----++---
""",
"testdist" :
""" 7
Bovine 0.0000 1.2385 1.3472 1.2070 1.0857 1.2832 1.2402
Mouse 1.2385 0.0000 1.1231 1.0966 1.1470 1.2157 1.1530
Gibbon 1.3472 1.1231 0.0000 0.5924 0.5077 0.5466 0.5001
Orang 1.2070 1.0966 0.5924 0.0000 0.3857 0.4405 0.4092
Gorilla 1.0857 1.1470 0.5077 0.3857 0.0000 0.3170 0.2817
Chimp 1.2832 1.2157 0.5466 0.4405 0.3170 0.0000 0.2570
Human 1.2402 1.1530 0.5001 0.4092 0.2817 0.2570 0.0000
""",
"testtree" :
"((BM3,TERP),CAM);"
}
if __name__ == "__main__":
print("Starting main")
main()
else:
print("fuck")
| mit |
eeshangarg/oh-mainline | mysite/missions/pipvirtualenv/forms.py | 15 | 1104 | # This file is part of OpenHatch.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import django.forms
class PipFreezeOutputForm(django.forms.Form):
pipfreeze_output = django.forms.CharField(
error_messages={'required': 'No pip freeze output was given.'}, widget=django.forms.Textarea())
class PipListOutputForm(django.forms.Form):
piplist_output = django.forms.CharField(
error_messages={'required': 'No pip list output was given.'}, widget=django.forms.Textarea())
| agpl-3.0 |
globau/servo | tests/wpt/css-tests/tools/pytest/testing/test_assertion.py | 170 | 19078 | # -*- coding: utf-8 -*-
import sys
import textwrap
import _pytest.assertion as plugin
import _pytest._code
import py
import pytest
from _pytest.assertion import reinterpret
from _pytest.assertion import util
PY3 = sys.version_info >= (3, 0)
@pytest.fixture
def mock_config():
class Config(object):
verbose = False
def getoption(self, name):
if name == 'verbose':
return self.verbose
raise KeyError('Not mocked out: %s' % name)
return Config()
def interpret(expr):
return reinterpret.reinterpret(expr, _pytest._code.Frame(sys._getframe(1)))
class TestBinReprIntegration:
def test_pytest_assertrepr_compare_called(self, testdir):
testdir.makeconftest("""
l = []
def pytest_assertrepr_compare(op, left, right):
l.append((op, left, right))
def pytest_funcarg__l(request):
return l
""")
testdir.makepyfile("""
def test_hello():
assert 0 == 1
def test_check(l):
assert l == [("==", 0, 1)]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_hello*FAIL*",
"*test_check*PASS*",
])
def callequal(left, right, verbose=False):
config = mock_config()
config.verbose = verbose
return plugin.pytest_assertrepr_compare(config, '==', left, right)
class TestAssert_reprcompare:
def test_different_types(self):
assert callequal([0, 1], 'foo') is None
def test_summary(self):
summary = callequal([0, 1], [0, 2])[0]
assert len(summary) < 65
def test_text_diff(self):
diff = callequal('spam', 'eggs')[1:]
assert '- spam' in diff
assert '+ eggs' in diff
def test_text_skipping(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs')
assert 'Skipping' in lines[1]
for line in lines:
assert 'a'*50 not in line
def test_text_skipping_verbose(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True)
assert '- ' + 'a'*50 + 'spam' in lines
assert '+ ' + 'a'*50 + 'eggs' in lines
def test_multiline_text_diff(self):
left = 'foo\nspam\nbar'
right = 'foo\neggs\nbar'
diff = callequal(left, right)
assert '- spam' in diff
assert '+ eggs' in diff
def test_list(self):
expl = callequal([0, 1], [0, 2])
assert len(expl) > 1
@pytest.mark.parametrize(
['left', 'right', 'expected'], [
([0, 1], [0, 2], """
Full diff:
- [0, 1]
? ^
+ [0, 2]
? ^
"""),
({0: 1}, {0: 2}, """
Full diff:
- {0: 1}
? ^
+ {0: 2}
? ^
"""),
(set([0, 1]), set([0, 2]), """
Full diff:
- set([0, 1])
? ^
+ set([0, 2])
? ^
""" if not PY3 else """
Full diff:
- {0, 1}
? ^
+ {0, 2}
? ^
""")
]
)
def test_iterable_full_diff(self, left, right, expected):
"""Test the full diff assertion failure explanation.
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
expl = callequal(left, right, verbose=False)
assert expl[-1] == 'Use -v to get the full diff'
expl = '\n'.join(callequal(left, right, verbose=True))
assert expl.endswith(textwrap.dedent(expected).strip())
def test_list_different_lenghts(self):
expl = callequal([0, 1], [0, 1, 2])
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
def test_dict(self):
expl = callequal({'a': 0}, {'a': 1})
assert len(expl) > 1
def test_dict_omitting(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1})
assert lines[1].startswith('Omitting 1 identical item')
assert 'Common items' not in lines
for line in lines[1:]:
assert 'b' not in line
def test_dict_omitting_verbose(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True)
assert lines[1].startswith('Common items:')
assert 'Omitting' not in lines[1]
assert lines[2] == "{'b': 1}"
def test_set(self):
expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_frozenzet(self):
expl = callequal(frozenset([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_Sequence(self):
col = py.builtin._tryimport(
"collections.abc",
"collections",
"sys")
if not hasattr(col, "MutableSequence"):
pytest.skip("cannot import MutableSequence")
MutableSequence = col.MutableSequence
class TestSequence(MutableSequence): # works with a Sequence subclass
def __init__(self, iterable):
self.elements = list(iterable)
def __getitem__(self, item):
return self.elements[item]
def __len__(self):
return len(self.elements)
def __setitem__(self, item, value):
pass
def __delitem__(self, item):
pass
def insert(self, item, index):
pass
expl = callequal(TestSequence([0, 1]), list([0, 2]))
assert len(expl) > 1
def test_list_tuples(self):
expl = callequal([], [(1,2)])
assert len(expl) > 1
expl = callequal([(1,2)], [])
assert len(expl) > 1
def test_list_bad_repr(self):
class A:
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert 'ValueError' in "".join(expl)
expl = callequal({}, {'1': A()})
assert 'faulty' in "".join(expl)
def test_one_repr_empty(self):
"""
the faulty empty string repr did trigger
a unbound local error in _diff_text
"""
class A(str):
def __repr__(self):
return ''
expl = callequal(A(), '')
assert not expl
def test_repr_no_exc(self):
expl = ' '.join(callequal('foo', 'bar'))
assert 'raised in repr()' not in expl
def test_unicode(self):
left = py.builtin._totext('£€', 'utf-8')
right = py.builtin._totext('£', 'utf-8')
expl = callequal(left, right)
assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8')
assert expl[1] == py.builtin._totext('- £€', 'utf-8')
assert expl[2] == py.builtin._totext('+ £', 'utf-8')
def test_nonascii_text(self):
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
"""
class A(str):
def __repr__(self):
return '\xff'
expl = callequal(A(), '1')
assert expl
def test_format_nonascii_explanation(self):
assert util.format_explanation('λ')
def test_mojibake(self):
# issue 429
left = 'e'
right = '\xc3\xa9'
if not isinstance(left, py.builtin.bytes):
left = py.builtin.bytes(left, 'utf-8')
right = py.builtin.bytes(right, 'utf-8')
expl = callequal(left, right)
for line in expl:
assert isinstance(line, py.builtin.text)
msg = py.builtin._totext('\n').join(expl)
assert msg
class TestFormatExplanation:
def test_special_chars_full(self, testdir):
# Issue 453, for the bug this would raise IndexError
testdir.makepyfile("""
def test_foo():
assert '\\n}' == ''
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*",
])
def test_fmt_simple(self):
expl = 'assert foo'
assert util.format_explanation(expl) == 'assert foo'
def test_fmt_where(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo'])
assert util.format_explanation(expl) == res
def test_fmt_and(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_where_nested(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'{foo = bar',
'}',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + where foo = bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline(self):
expl = '\n'.join(['assert "foo" == "bar"',
'~- foo',
'~+ bar'])
res = '\n'.join(['assert "foo" == "bar"',
' - foo',
' + bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline_escaped(self):
expl = '\n'.join(['assert foo == bar',
'baz'])
res = 'assert foo == bar\\nbaz'
assert util.format_explanation(expl) == res
def test_fmt_newline_before_where(self):
expl = '\n'.join(['the assertion message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_multi_newline_before_where(self):
expl = '\n'.join(['the assertion',
'~message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion',
' message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_python25_compile_issue257(testdir):
testdir.makepyfile("""
def test_rewritten():
assert 1 == 2
# some comment
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines("""
*E*assert 1 == 2*
*1 failed*
""")
def test_rewritten(testdir):
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()
""")
assert testdir.runpytest().ret == 0
def test_reprcompare_notin(mock_config):
detail = plugin.pytest_assertrepr_compare(
mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
def test_pytest_assertrepr_compare_integration(testdir):
testdir.makepyfile("""
def test_hello():
x = set(range(100))
y = x.copy()
y.remove(50)
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*50*",
])
def test_sequence_comparison_uses_repr(testdir):
testdir.makepyfile("""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
])
def test_assert_compare_truncate_longmessage(monkeypatch, testdir):
testdir.makepyfile(r"""
def test_long():
a = list(range(200))
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
""")
monkeypatch.delenv('CI', raising=False)
result = testdir.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines([
"*- 1",
"*- 3",
"*- 5",
"*- 7",
"*truncated (191 more lines)*use*-vv*",
])
result = testdir.runpytest('-vv')
result.stdout.fnmatch_lines([
"*- 197",
])
monkeypatch.setenv('CI', '1')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*- 197",
])
def test_assertrepr_loaded_per_dir(testdir):
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
a = testdir.mkdir('a')
a_test = a.join('test_a.py')
a_test.write('def test_a(): assert 1 == 2')
a_conftest = a.join('conftest.py')
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
b = testdir.mkdir('b')
b_test = b.join('test_b.py')
b_test.write('def test_b(): assert 1 == 2')
b_conftest = b.join('conftest.py')
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*def test_base():*',
'*E*assert 1 == 2*',
'*def test_a():*',
'*E*assert summary a*',
'*def test_b():*',
'*E*assert summary b*'])
def test_assertion_options(testdir):
testdir.makepyfile("""
def test_hello():
x = 3
assert x == 4
""")
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
off_options = (("--no-assert",),
("--nomagic",),
("--no-assert", "--nomagic"),
("--assert=plain",),
("--assert=plain", "--no-assert"),
("--assert=plain", "--nomagic"),
("--assert=plain", "--no-assert", "--nomagic"))
for opt in off_options:
result = testdir.runpytest_subprocess(*opt)
assert "3 == 4" not in result.stdout.str()
def test_old_assert_mode(testdir):
testdir.makepyfile("""
def test_in_old_mode():
assert "@py_builtins" not in globals()
""")
result = testdir.runpytest_subprocess("--assert=reinterp")
assert result.ret == 0
def test_triple_quoted_string_issue113(testdir):
testdir.makepyfile("""
def test_hello():
assert "" == '''
'''""")
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines([
"*1 failed*",
])
assert 'SyntaxError' not in result.stdout.str()
def test_traceback_failure(testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
result = testdir.runpytest(p1) # "auto"
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
def test_warn_missing(testdir):
testdir.makepyfile("")
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
def test_recursion_source_decode(testdir):
testdir.makepyfile("""
def test_something():
pass
""")
testdir.makeini("""
[pytest]
python_files = *.py
""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("""
<Module*>
""")
def test_AssertionError_message(testdir):
testdir.makepyfile("""
def test_hello():
x,y = 1,2
assert 0, (x,y)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*def test_hello*
*assert 0, (x,y)*
*AssertionError: (1, 2)*
""")
@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
def test_set_with_unsortable_elements():
# issue #718
class UnsortableKey(object):
def __init__(self, name):
self.name = name
def __lt__(self, other):
raise RuntimeError()
def __repr__(self):
return 'repr({0})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
expl = callequal(left_set, right_set, verbose=True)
# skip first line because it contains the "construction" of the set, which does not have a guaranteed order
expl = expl[1:]
dedent = textwrap.dedent("""
Extra items in the left set:
repr(1)
Extra items in the right set:
repr(3)
Full diff (fallback to calling repr on each item):
- repr(1)
repr(2)
+ repr(3)
""").strip()
assert '\n'.join(expl) == dedent
| mpl-2.0 |
behnamm/cs244b_project | bindings/python/testutil.py | 20 | 6313 | # Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Utilities for unit tests."""
import retries
class BreakException(Exception):
"""Break out of a unit test early."""
pass
class Opaque(object):
"""A serializable object that equals only itself."""
def __init__(self):
self._id = id(self)
def __hash__(self):
return self._id
def __cmp__(self, other):
return cmp(self._id, other._id)
class Counter(object):
"""A strictly increasing counter.
One way to use this class is with the C{with} statement. This way, you
can't forget to call L{done}. See L{test_testutil.TestCounter.test_with}
for an example.
@ivar count: The number of times L{bump} has been called minus 1.
@type count: C{int}
"""
def __init__(self, tc, steps=None):
"""
@param tc: The test case with which to make assertions.
@type tc: C{unittest.TestCase}
@param steps: The number of times L{bump} should be called over the
lifetime of the counter. This is optional.
@type steps: C{int} or C{None}
"""
self.tc = tc
self.steps = steps
self.count = -1
def bump(self, expected=None):
"""Increment L{count}.
If C{steps} was passed to the constructor and L{bump} has now been
called more than C{steps} times, this method will fail the test case.
@param expected: The value of L{count} expected after incrementing it.
This is optional. If an C{int} is passed in, this
method will test whether the new value of L{count}
equals C{expected}. If a container is passed in, this
method will test whether the new value of L{count} is
C{in expected}.
@type expected: C{int} or a container of C{int}s
@return: The new value of L{count} as a convenience.
@rtype: C{int}
"""
self.count += 1
if self.steps is not None:
self.tc.assert_(self.count + 1 <= self.steps,
"count=%d, steps=%d" % (self.count, self.steps))
if expected is not None:
try:
self.tc.assert_(self.count in expected)
except TypeError:
self.tc.assertEquals(self.count, expected)
return self.count
def done(self):
"""Ensure L{bump} was called the required number of C{steps}, as given
to L{__init__}."""
if self.steps is not None:
self.tc.assertEqual(self.count + 1, self.steps)
# context manager interface:
def __enter__(self):
"""No op.
@return: this instance
@rtype: L{Counter}
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Wrapper for L{done}.
Prefers existing exceptions over those caused by L{done}.
"""
try:
self.done()
except:
if exc_type is None:
raise
else:
# If there was already an exception, I'm betting it's more
# interesting.
print "Suppressed exception from Counter.__exit__()"
class MockRetry(retries.ImmediateRetry):
"""A mock implementation of a L{retries.ImmediateRetry}.
This retry implementation only runs for a single iteration, which is usually
enough.
"""
def __init__(self, tc, expect_immediate=False, expect_later=False):
"""
@param tc: The test case with which to make assertions.
@type tc: C{unittest.TestCase}
@param expect_immediate: Whether to expect a call to L{immediate}.
@type expect_immediate: C{bool}
@param expect_later: Whether to expect a call to L{later}.
@type expect_later: C{bool}
"""
retries.ImmediateRetry.__init__(self)
self.tc = tc
self.expect_immediate = expect_immediate
self.expect_later = expect_later
def __call__(self):
"""Reinitialized this instance.
This way L{MockRetry} behaves somewhat like a class.
@return: this instance
@rtype: L{MockRetry}
"""
retries.ImmediateRetry.__init__(self)
return self
def next(self):
r = retries.ImmediateRetry.next(self)
if self.count == 1:
raise BreakException
return r
def immediate(self):
self.tc.assert_(self.expect_immediate)
self.expect_immediate = False
retries.ImmediateRetry.immediate(self)
def later(self):
self.tc.assert_(self.expect_later)
self.expect_later = False
retries.ImmediateRetry.later(self)
def done(self):
"""Ensures C{expect_immediate} and C{expect_later} have been
satisfied."""
self.tc.assertFalse(self.expect_immediate)
self.tc.assertFalse(self.expect_later)
def __enter__(self):
"""No op.
@return: this instance
@rtype: L{MockRetry}
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Wrapper for L{done}.
Prefers existing exceptions over those caused by L{done}.
"""
try:
self.done()
except:
if exc_type is None:
raise
else:
# If there was already an exception, I'm betting it's more
# interesting.
print "Suppressed exception from MockRetry.__exit__()"
| isc |
xutian/avocado-vt | virttest/libvirt_version.py | 4 | 1889 | """
Shared code for tests that need to get the libvirt version
"""
import re
import logging
from avocado.utils import process
from virttest.compat_52lts import decode_to_text
def version_compare(major, minor, update, session=None):
"""
Determine/use the current libvirt library version on the system
and compare input major, minor, and update values against it.
If the running version is greater than or equal to the input
params version, then return True; otherwise, return False
This is designed to handle upstream version comparisons for
test adjustments and/or comparisons as a result of upstream
fixes or changes that could impact test results.
:param major: Major version to compare against
:param minor: Minor version to compare against
:param update: Update value to compare against
:param session: Shell session on remote host
:return: True if running version is greater than or
equal to the input libvirt version
"""
LIBVIRT_LIB_VERSION = 0
func = process.system_output
if session:
func = session.cmd_output
try:
regex = r'[Uu]sing\s*[Ll]ibrary:\s*[Ll]ibvirt\s*'
regex += r'(\d+)\.(\d+)\.(\d+)'
lines = decode_to_text(func("virsh version")).splitlines()
for line in lines:
mobj = re.search(regex, line)
if bool(mobj):
LIBVIRT_LIB_VERSION = int(mobj.group(1)) * 1000000 + \
int(mobj.group(2)) * 1000 + \
int(mobj.group(3))
break
except (ValueError, TypeError, AttributeError):
logging.warning("Error determining libvirt version")
return False
compare_version = major * 1000000 + minor * 1000 + update
if LIBVIRT_LIB_VERSION >= compare_version:
return True
return False
| gpl-2.0 |
sony/nnabla | python/src/nnabla/utils/converter/nnabla/importer.py | 1 | 6376 | # Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.protobuf.text_format as text_format
import os
import shutil
import tempfile
import zipfile
from . import expander
from nnabla.utils import nnabla_pb2
class NnpImporter:
def _shrink_with_executor(self, executor):
print(' Try to leave only executor[{}].'.format(executor.name))
network = None
for n in self._nnp.network:
if n.name == executor.network_name:
network = n
if network is None:
return None
nnp = nnabla_pb2.NNablaProtoBuf()
nnp.CopyFrom(self._nnp)
nnp.ClearField('optimizer')
nnp.ClearField('monitor')
nnp.ClearField('network')
net = nnp.network.add()
net.CopyFrom(network)
nnp.ClearField('executor')
exe = nnp.executor.add()
exe.CopyFrom(executor)
return nnp
def __init__(self, *args, **kwargs):
self._args = args
self._expand_network = False
if 'expand_network' in kwargs:
self._expand_network = kwargs['expand_network']
self._executor_index = None
if 'executor_index' in kwargs:
self._executor_index = kwargs['executor_index']
def load_parameters(self, filename):
e = os.path.splitext(filename)[1].lower()
if e == '.h5':
import h5py
with h5py.File(filename, 'r') as hd:
keys = []
def _get_keys(name):
ds = hd[name]
if not isinstance(ds, h5py.Dataset):
# Group
return
# To preserve order of parameters
keys.append((ds.attrs.get('index', None), name))
hd.visit(_get_keys)
for _, key in sorted(keys):
ds = hd[key]
parameter = self._nnp.parameter.add()
parameter.variable_name = key
parameter.shape.dim.extend(ds.shape)
parameter.data.extend(ds[...].flatten())
if ds.attrs['need_grad']:
parameter.need_grad = True
else:
parameter.need_grad = False
elif e == '.protobuf':
with open(filename, 'rb') as f:
self._nnp.MergeFromString(f.read())
def find_network(self, executor_name):
net = None
for network in self._nnp.network:
if network.name == executor_name:
net = network
return net
def find_parameter_variable(self, network):
var_list = []
for var in network.variable:
if var.type == "Parameter" and var.initializer:
var_list.append(var)
return var_list
def generate_parameters_data(self, var_list, batch_size):
from nnabla.core.graph_def import _create_initializer
from nnabla.parameter import get_parameter_or_create
import numpy as np
rng = np.random.RandomState(0)
for var in var_list:
shape = tuple(
[d if d >= 1 else batch_size for d in var.shape.dim])
initializer = _create_initializer(var, rng)
variable_instance = get_parameter_or_create(
var.name, shape, initializer)
p = self._nnp.parameter.add()
p.variable_name = var.name
p.shape.dim.extend(shape)
p.data.extend(variable_instance.d.flatten())
def execute(self):
self._nnp = nnabla_pb2.NNablaProtoBuf()
other_files = []
for ifile in self._args:
print('Importing {}'.format(ifile))
ext = os.path.splitext(ifile)[1].lower()
if ext == '.nnp':
try:
tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(ifile, 'r') as nnpzip:
for name in nnpzip.namelist():
if os.path.splitext(name)[1].lower() in ['.nntxt', '.prototxt']:
nnpzip.extract(name, tmpdir)
with open(os.path.join(tmpdir, name), 'rt') as f:
text_format.Merge(f.read(), self._nnp)
for name in nnpzip.namelist(): # Param
if os.path.splitext(name)[1].lower() in ['.protobuf', '.h5']:
nnpzip.extract(name, tmpdir)
self.load_parameters(
os.path.join(tmpdir, name))
finally:
shutil.rmtree(tmpdir)
elif ext in ['.nntxt', '.prototxt']:
with open(ifile, 'rt') as f:
text_format.Merge(f.read(), self._nnp)
elif ext in ['.protobuf', '.h5']:
self.load_parameters(ifile)
else:
other_files.append(ifile)
executor_name = self._nnp.executor[0].network_name
network = self.find_network(executor_name)
parameter_variable_list = self.find_parameter_variable(network)
if parameter_variable_list and not self._nnp.parameter:
self.generate_parameters_data(
parameter_variable_list, network.batch_size)
if self._executor_index is not None:
if self._executor_index < len(self._nnp.executor):
self._nnp = self._shrink_with_executor(
self._nnp.executor[self._executor_index])
if self._expand_network:
self._nnp = expander.NnpExpander(self._nnp).execute()
class nnp:
pass
nnp.protobuf = self._nnp
nnp.other_files = other_files
return nnp
| apache-2.0 |
betrisey/home-assistant | homeassistant/components/sensor/yahoo_finance.py | 1 | 3591 | """
Currency exchange rate support that comes from Yahoo Finance.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.yahoo_finance/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, ATTR_ATTRIBUTION)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['yahoo-finance==1.3.2']
_LOGGER = logging.getLogger(__name__)
ATTR_CHANGE = 'Change'
ATTR_OPEN = 'open'
ATTR_PREV_CLOSE = 'prev_close'
CONF_ATTRIBUTION = "Stock market information provided by Yahoo! Inc."
CONF_SYMBOL = 'symbol'
DEFAULT_NAME = 'Yahoo Stock'
DEFAULT_SYMBOL = 'YHOO'
ICON = 'mdi:currency-usd'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SYMBOL, default=DEFAULT_SYMBOL): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yahoo Finance sensor."""
name = config.get(CONF_NAME)
symbol = config.get(CONF_SYMBOL)
data = YahooFinanceData(name, symbol)
add_devices([YahooFinanceSensor(name, data, symbol)])
# pylint: disable=too-few-public-methods
class YahooFinanceSensor(Entity):
"""Representation of a Yahoo Finance sensor."""
def __init__(self, name, data, symbol):
"""Initialize the sensor."""
self._name = name
self.data = data
self._symbol = symbol
self._state = None
self._unit_of_measurement = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._symbol
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state is not None:
return {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
ATTR_CHANGE: self.data.price_change,
ATTR_OPEN: self.data.price_open,
ATTR_PREV_CLOSE: self.data.prev_close,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor %s - %s", self._name, self._state)
self.data.update()
self._state = self.data.state
class YahooFinanceData(object):
"""Get data from Yahoo Finance."""
def __init__(self, name, symbol):
"""Initialize the data object."""
from yahoo_finance import Share
self._name = name
self._symbol = symbol
self.state = None
self.price_change = None
self.price_open = None
self.prev_close = None
self.stock = Share(symbol)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data and updates the states."""
self.stock.refresh()
self.state = self.stock.get_price()
self.price_change = self.stock.get_change()
self.price_open = self.stock.get_open()
self.prev_close = self.stock.get_prev_close()
| mit |
jdf/processing.py | testing/resources/test_loadthings.py | 2 | 1157 | helloworld = loadStrings("strings.txt")
assert helloworld[0] == 'hello'
assert helloworld[1] == 'world'
helloworld = loadStrings(createReader("strings.txt"))
assert helloworld[0] == 'hello'
assert helloworld[1] == 'world'
expected = 'hello\nworld\n'
for i, c in enumerate(loadBytes("strings.txt")):
assert c == ord(expected[i])
o = loadJSONObject("object.json")
assert o.getString('phrase') == 'hello world'
assert o.getInt('amount') == 42
from java.io import File
o = loadJSONObject(File("testing/resources/data/object.json"))
assert o.getString('phrase') == 'hello world'
assert o.getInt('amount') == 42
a = loadJSONArray("array.json")
assert a.getString(0) == 'hello'
assert a.getString(1) == 'world'
a = loadJSONArray(File("testing/resources/data/array.json"))
assert a.getString(0) == 'hello'
assert a.getString(1) == 'world'
expected = ['hello', 'world']
helloworld = loadStrings(createInput("strings.txt"))
assert helloworld[0] == 'hello'
assert helloworld[1] == 'world'
helloworld = loadStrings(createInput(File("testing/resources/data/strings.txt")))
assert helloworld[0] == 'hello'
assert helloworld[1] == 'world'
print 'OK'
exit()
| apache-2.0 |
tillahoffmann/tensorflow | tensorflow/python/training/saver_large_partitioned_variable_test.py | 141 | 2261 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargePartitionedVariableTest(test.TestCase):
# Need to do this in a separate test because of the amount of memory needed
# to run this test.
def testLargePartitionedVariables(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
var_name = "my_var"
# Saving large partition variable.
with session.Session("", graph=ops.Graph()) as sess:
with ops.device("/cpu:0"):
# Create a partitioned variable which is larger than int32 size but
# split into smaller sized variables.
init = lambda shape, dtype, partition_info: constant_op.constant(
True, dtype, shape)
partitioned_var = partitioned_variables.create_partitioned_variables(
[1 << 31], [4], init, dtype=dtypes.bool, name=var_name)
variables.global_variables_initializer().run()
save = saver.Saver(partitioned_var)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
if __name__ == "__main__":
test.main()
| apache-2.0 |
martinburchell/econsensus | django/econsensus/publicweb/config.py | 4 | 1857 | #Configuration for django-livesettings.
#http://django-livesettings.readthedocs.org
#pylint: disable=E1102
# '_ is not callable'
from livesettings import config_register, ConfigurationGroup, StringValue, \
BooleanValue, PasswordValue, IntegerValue
from django.utils.translation import ugettext_lazy as _
RECEIVEMAIL_GROUP = ConfigurationGroup(
'ReceiveMail', # key: internal name of the group to be created
_('Receiving email Settings (POP3)'), # name: verbose name which can be automatically translated
ordering=0 # ordering: order of group in the list (default is 1)
)
config_register(StringValue(
RECEIVEMAIL_GROUP, # group: object of ConfigurationGroup created above
'USERNAME', # key: internal name of the configuration value to be created
description = _('Username'), # label for the value
help_text = _("Enter the Username used to access the email account."), # help text
ordering = 0
))
config_register(PasswordValue(
RECEIVEMAIL_GROUP,
'PASSWORD',
description='Password',
help_text='Enter the password to access this mail account.',
render_value=True,
ordering = 1
))
config_register(StringValue(
RECEIVEMAIL_GROUP,
'SERVER',
description=_("Server"),
help_text=_("Enter the url of the mail server."),
ordering = 2
))
config_register(IntegerValue(
RECEIVEMAIL_GROUP,
'PORT',
description=_("Port"),
help_text=_("Enter the port number of the mail server."),
ordering = 3
))
config_register(BooleanValue(
RECEIVEMAIL_GROUP,
'SSL_ENABLED',
description=_("SSL Enabled"),
help_text=_("Check to enable SSL transfer"),
default=False,
ordering = 4
))
| gpl-3.0 |
laysakura/chainer | tests/chainer_tests/functions_tests/activation_tests/test_lstm.py | 16 | 4559 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _sigmoid(x):
return 1 / (1 + numpy.exp(-x))
class TestLSTM(unittest.TestCase):
def setUp(self):
self.c_prev = numpy.random.uniform(-1,
1, (3, 2, 4)).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(numpy.float32)
self.gc = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
self.gh = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
def flat(self):
self.c_prev = self.c_prev[:, :, 0].copy()
self.x = self.x[:, :, 0].copy()
self.gc = self.gc[:, :, 0].copy()
self.gh = self.gh[:, :, 0].copy()
def check_forward(self, c_prev_data, x_data):
c_prev = chainer.Variable(c_prev_data)
x = chainer.Variable(x_data)
c, h = functions.lstm(c_prev, x)
self.assertEqual(c.data.dtype, numpy.float32)
self.assertEqual(h.data.dtype, numpy.float32)
# Compute expected out
a_in = self.x[:, [0, 4]]
i_in = self.x[:, [1, 5]]
f_in = self.x[:, [2, 6]]
o_in = self.x[:, [3, 7]]
c_expect = _sigmoid(i_in) * numpy.tanh(a_in) + \
_sigmoid(f_in) * self.c_prev
h_expect = _sigmoid(o_in) * numpy.tanh(c_expect)
gradient_check.assert_allclose(c_expect, c.data)
gradient_check.assert_allclose(h_expect, h.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.c_prev, self.x)
@condition.retry(3)
def test_flat_forward_cpu(self):
self.flat()
self.test_forward_cpu()
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.c_prev), cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_flat_forward_gpu(self):
self.flat()
self.test_forward_gpu()
def check_backward(self, c_prev_data, x_data, c_grad, h_grad):
c_prev = chainer.Variable(c_prev_data)
x = chainer.Variable(x_data)
c, h = functions.lstm(c_prev, x)
c.grad = c_grad
h.grad = h_grad
c.backward()
func = c.creator
f = lambda: func.forward((c_prev.data, x.data))
gc_prev, gx = gradient_check.numerical_grad(
f, (c_prev.data, x.data), (c_grad, h_grad), eps=1e-2)
gradient_check.assert_allclose(gc_prev, c_prev.grad)
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_full_backward_cpu(self):
self.check_backward(self.c_prev, self.x, self.gc, self.gh)
@condition.retry(3)
def test_flat_full_backward_cpu(self):
self.flat()
self.test_full_backward_cpu()
@condition.retry(3)
def test_no_gc_backward_cpu(self):
self.check_backward(self.c_prev, self.x, None, self.gh)
@condition.retry(3)
def test_flat_no_gc_backward_cpu(self):
self.flat()
self.test_no_gc_backward_cpu()
@condition.retry(3)
def test_no_gh_backward_cpu(self):
self.check_backward(self.c_prev, self.x, self.gc, None)
@condition.retry(3)
def test_flat_no_gh_backward_cpu(self):
self.flat()
self.test_no_gh_backward_cpu()
@attr.gpu
@condition.retry(3)
def test_full_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.c_prev), cuda.to_gpu(self.x),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gh))
@attr.gpu
@condition.retry(3)
def test_flat_full_backward_gpu(self):
self.flat()
self.test_full_backward_gpu()
@attr.gpu
@condition.retry(3)
def test_no_gc_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.c_prev), cuda.to_gpu(self.x),
None, cuda.to_gpu(self.gh))
@attr.gpu
@condition.retry(3)
def test_flat_no_gc_backward_gpu(self):
self.flat()
self.test_no_gc_backward_gpu()
@attr.gpu
@condition.retry(3)
def test_no_gh_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.c_prev), cuda.to_gpu(self.x),
cuda.to_gpu(self.gc), None)
@attr.gpu
@condition.retry(3)
def test_flat_no_gh_backward_gpu(self):
self.flat()
self.test_no_gh_backward_gpu()
testing.run_module(__name__, __file__)
| mit |
coursemdetw/2014c2 | wsgi/static/reeborg/src/libraries/brython/Lib/subprocess.py | 11 | 16440 | """subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
output = check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc is not None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
CREATE_NEW_CONSOLE = 16
CREATE_NEW_PROCESS_GROUP = 512
class CalledProcessError(Exception):
pass
MAXFD = 256
PIPE = -1
class Popen(object):
pass
STARTF_USESHOWWINDOW = 1
STARTF_USESTDHANDLES = 256
class STARTUPINFO:
pass
STDOUT = -2
STD_ERROR_HANDLE = -12
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
SW_HIDE = 0
__all__ = "['Popen', 'PIPE', 'STDOUT', 'call', 'check_call', 'check_output', 'CalledProcessError',
'CREATE_NEW_CONSOLE', 'CREATE_NEW_PROCESS_GROUP', 'STD_INPUT_HANDLE', 'STD_OUTPUT_HANDLE',
'STD_ERROR_HANDLE', 'SW_HIDE', 'STARTF_USESTDHANDLES', 'STARTF_USESHOWWINDOW']"
_active = "[]"
def _cleanup(*args,**kw):
pass
def _demo_posix(*args,**kw):
pass
def _demo_windows(*args,**kw):
pass
def _eintr_retry_call(*args,**kw):
pass
_subprocess = "<module '_subprocess' (built-in)>"
def call(*args,**kw):
"""Run command with arguments. Wait for command to complete, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
pass
def check_call(*args,**kw):
"""Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
pass
def check_output(*args,**kw):
"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
pass
errno = "<module 'errno' (built-in)>"
gc = "<module 'gc' (built-in)>"
def list2cmdline(*args,**kw):
""" Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
pass
msvcrt = "<module 'msvcrt' (built-in)>"
mswindows = True
os = "<module 'os' from 'c:\python27\lib\os.pyc'>"
class pywintypes:
pass
signal = "<module 'signal' (built-in)>"
sys = "<module 'sys' (built-in)>"
threading = "<module 'threading' from 'c:\python27\lib\threading.pyc'>"
traceback = "<module 'traceback' from 'c:\python27\lib\traceback.pyc'>"
types = "<module 'types' from 'c:\python27\lib\types.pyc'>"
| gpl-2.0 |
craynot/django | tests/gis_tests/geoapp/test_feeds.py | 292 | 4194 | from __future__ import unicode_literals
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from .models import City
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoFeedTest(TestCase):
fixtures = ['initial']
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| bsd-3-clause |
jcftang/ansible-modules-extras | monitoring/logicmonitor.py | 21 | 76208 | #!/usr/bin/python
"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
Copyright (C) 2015 LogicMonitor
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
import datetime
import os
import platform
import socket
import sys
import types
import urllib
HAS_LIB_JSON = True
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if (
not isinstance(json.loads, types.FunctionType) or
not isinstance(json.dumps, types.FunctionType)
):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print(
'\n{"msg": "Error: ansible requires the stdlib json or ' +
'simplejson module, neither was found!", "failed": true}'
)
HAS_LIB_JSON = False
except SyntaxError:
print(
'\n{"msg": "SyntaxError: probably due to installed simplejson ' +
'being for a different python version", "failed": true}'
)
HAS_LIB_JSON = False
RETURN = '''
---
success:
description: flag indicating that execution was successful
returned: success
type: boolean
sample: True
...
'''
DOCUMENTATION = '''
---
module: logicmonitor
short_description: Manage your LogicMonitor account through Ansible Playbooks
description:
- LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
- This module manages hosts, host groups, and collectors within your LogicMonitor account.
version_added: "2.2"
author: Ethan Culler-Mayeno, Jeff Wozniak
notes:
- You must have an existing LogicMonitor account for this module to function.
requirements: ["An existing LogicMonitor account", "Linux"]
options:
target:
description:
- The type of LogicMonitor object you wish to manage.
- "Collector: Perform actions on a LogicMonitor collector"
- NOTE You should use Ansible service modules such as 'service' or 'supervisorctl' for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove.
- "Host: Perform actions on a host device"
- "Hostgroup: Perform actions on a LogicMonitor host group"
- NOTE Host and Hostgroup tasks should always be performed via local_action. There are no benefits to running these tasks on the remote host and doing so will typically cause problems.
required: true
default: null
choices: ['collector', 'host', 'datsource', 'hostgroup']
action:
description:
- The action you wish to perform on target
- "Add: Add an object to your LogicMonitor account"
- "Remove: Remove an object from your LogicMonitor account"
- "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account"
- "SDT: Schedule downtime for an object in your LogicMonitor account"
required: true
default: null
choices: ['add', 'remove', 'update', 'sdt']
company:
description:
- The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes"
required: true
default: null
user:
description:
- A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user
required: true
default: null
password:
description:
- The password of the specified LogicMonitor user
required: true
default: null
collector:
description:
- The fully qualified domain name of a collector in your LogicMonitor account.
- This is required for the creation of a LogicMonitor host (target=host action=add)
- This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt)
required: false
default: null
hostname:
description:
- The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage.
- Optional for managing hosts (target=host)
required: false
default: 'hostname -f'
displayname:
description:
- The display name of a host in your LogicMonitor account or the desired display name of a device to manage.
- Optional for managing hosts (target=host)
required: false
default: 'hostname -f'
description:
description:
- The long text description of the object in your LogicMonitor account
- Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
required: false
default: ""
properties:
description:
- A dictionary of properties to set on the LogicMonitor host or host group.
- Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update)
- This parameter will add or update existing properties in your LogicMonitor account or
required: false
default: {}
groups:
description:
- A list of groups that the host should be a member of.
- Optional for managing hosts (target=host; action=add or action=update)
required: false
default: []
id:
description:
- ID of the datasource to target
- Required for management of LogicMonitor datasources (target=datasource)
required: false
default: null
fullpath:
description:
- The fullpath of the host group object you would like to manage
- Recommend running on a single Ansible host
- Required for management of LogicMonitor host groups (target=hostgroup)
required: false
default: null
alertenable:
description:
- A boolean flag to turn alerting on or off for an object
- Optional for managing all hosts (action=add or action=update)
required: false
default: true
choices: [true, false]
starttime:
description:
- The time that the Scheduled Down Time (SDT) should begin
- Optional for managing SDT (action=sdt)
- Y-m-d H:M
required: false
default: Now
duration:
description:
- The duration (minutes) of the Scheduled Down Time (SDT)
- Optional for putting an object into SDT (action=sdt)
required: false
default: 30
...
'''
EXAMPLES = '''
# example of adding a new LogicMonitor collector to these devices
---
- hosts: collectors
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Deploy/verify LogicMonitor collectors
become: yes
logicmonitor:
target=collector
action=add
company={{ company }}
user={{ user }}
password={{ password }}
#example of adding a list of hosts into monitoring
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Deploy LogicMonitor Host
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=host
action=add
collector='mycompany-Collector'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
groups="/servers/production,/datacenter1"
properties="{'snmp.community':'secret','dc':'1', 'type':'prod'}"
#example of putting a datasource in SDT
---
- hosts: localhost
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: SDT a datasource
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=datasource
action=sdt
id='123'
duration=3000
starttime='2017-03-04 05:06'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
#example of creating a hostgroup
---
- hosts: localhost
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Create a host group
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=add
fullpath='/servers/development'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
properties="{'snmp.community':'commstring', 'type':'dev'}"
#example of putting a list of hosts into SDT
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: SDT hosts
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=host
action=sdt
duration=3000
starttime='2016-11-10 09:08'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
collector='mycompany-Collector'
#example of putting a host group in SDT
---
- hosts: localhost
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: SDT a host group
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=sdt
fullpath='/servers/development'
duration=3000
starttime='2017-03-04 05:06'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
#example of updating a list of hosts
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Update a list of hosts
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=host
action=update
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
collector='mycompany-Collector'
groups="/servers/production,/datacenter5"
properties="{'snmp.community':'commstring','dc':'5'}"
#example of updating a hostgroup
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Update a host group
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=update
fullpath='/servers/development'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
properties="{'snmp.community':'hg', 'type':'dev', 'status':'test'}"
#example of removing a list of hosts from monitoring
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Remove LogicMonitor hosts
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=host
action=remove
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
collector='mycompany-Collector'
#example of removing a host group
---
- hosts: hosts
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Remove LogicMonitor development servers hostgroup
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=remove
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
fullpath='/servers/development'
- name: Remove LogicMonitor servers hostgroup
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=remove
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
fullpath='/servers'
- name: Remove LogicMonitor datacenter1 hostgroup
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=remove
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
fullpath='/datacenter1'
- name: Remove LogicMonitor datacenter5 hostgroup
# All tasks except for target=collector should use local_action
local_action: >
logicmonitor
target=hostgroup
action=remove
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
fullpath='/datacenter5'
### example of removing a new LogicMonitor collector to these devices
---
- hosts: collectors
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Remove LogicMonitor collectors
become: yes
logicmonitor:
target=collector
action=remove
company={{ company }}
user={{ user }}
password={{ password }}
#complete example
---
- hosts: localhost
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Create a host group
local_action: >
logicmonitor
target=hostgroup
action=add
fullpath='/servers/production/database'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
properties="{'snmp.community':'commstring'}"
- name: SDT a host group
local_action: >
logicmonitor
target=hostgroup
action=sdt
fullpath='/servers/production/web'
duration=3000
starttime='2012-03-04 05:06'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
- hosts: collectors
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: Deploy/verify LogicMonitor collectors
logicmonitor:
target: collector
action: add
company: {{ company }}
user: {{ user }}
password: {{ password }}
- name: Place LogicMonitor collectors into 30 minute Scheduled downtime
logicmonitor: target=collector action=sdt company={{ company }}
user={{ user }} password={{ password }}
- name: Deploy LogicMonitor Host
local_action: >
logicmonitor
target=host
action=add
collector=agent1.ethandev.com
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
properties="{'snmp.community':'commstring', 'dc':'1'}"
groups="/servers/production/collectors, /datacenter1"
- hosts: database-servers
remote_user: '{{ username }}'
vars:
company: 'mycompany'
user: 'myusername'
password: 'mypassword'
tasks:
- name: deploy logicmonitor hosts
local_action: >
logicmonitor
target=host
action=add
collector=monitoring.dev.com
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
properties="{'snmp.community':'commstring', 'type':'db', 'dc':'1'}"
groups="/servers/production/database, /datacenter1"
- name: schedule 5 hour downtime for 2012-11-10 09:08
local_action: >
logicmonitor
target=host
action=sdt
duration=3000
starttime='2012-11-10 09:08'
company='{{ company }}'
user='{{ user }}'
password='{{ password }}'
'''
class LogicMonitor(object):
def __init__(self, module, **params):
self.__version__ = "1.0-python"
self.module = module
self.module.debug("Instantiating LogicMonitor object")
self.check_mode = False
self.company = params["company"]
self.user = params["user"]
self.password = params["password"]
self.fqdn = socket.getfqdn()
self.lm_url = "logicmonitor.com/santaba"
self.__version__ = self.__version__ + "-ansible-module"
def rpc(self, action, params):
"""Make a call to the LogicMonitor RPC library
and return the response"""
self.module.debug("Running LogicMonitor.rpc")
param_str = urllib.urlencode(params)
creds = urllib.urlencode(
{"c": self.company,
"u": self.user,
"p": self.password})
if param_str:
param_str = param_str + "&"
param_str = param_str + creds
try:
url = ("https://" + self.company + "." + self.lm_url +
"/rpc/" + action + "?" + param_str)
# Set custom LogicMonitor header with version
headers = {"X-LM-User-Agent": self.__version__}
# Set headers
f = open_url(url, headers=headers)
raw = f.read()
resp = json.loads(raw)
if resp["status"] == 403:
self.module.debug("Authentication failed.")
self.fail(msg="Error: " + resp["errmsg"])
else:
return raw
except IOError:
self.fail(msg="Error: Unknown exception making RPC call")
def do(self, action, params):
"""Make a call to the LogicMonitor
server \"do\" function"""
self.module.debug("Running LogicMonitor.do...")
param_str = urllib.urlencode(params)
creds = (urllib.urlencode(
{"c": self.company,
"u": self.user,
"p": self.password}))
if param_str:
param_str = param_str + "&"
param_str = param_str + creds
try:
self.module.debug("Attempting to open URL: " +
"https://" + self.company + "." + self.lm_url +
"/do/" + action + "?" + param_str)
f = open_url(
"https://" + self.company + "." + self.lm_url +
"/do/" + action + "?" + param_str)
return f.read()
except IOError:
# self.module.debug("Error opening URL. " + ioe)
self.fail("Unknown exception opening URL")
def get_collectors(self):
"""Returns a JSON object containing a list of
LogicMonitor collectors"""
self.module.debug("Running LogicMonitor.get_collectors...")
self.module.debug("Making RPC call to 'getAgents'")
resp = self.rpc("getAgents", {})
resp_json = json.loads(resp)
if resp_json["status"] is 200:
self.module.debug("RPC call succeeded")
return resp_json["data"]
else:
self.fail(msg=resp)
def get_host_by_hostname(self, hostname, collector):
"""Returns a host object for the host matching the
specified hostname"""
self.module.debug("Running LogicMonitor.get_host_by_hostname...")
self.module.debug("Looking for hostname " + hostname)
self.module.debug("Making RPC call to 'getHosts'")
hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
if collector:
if hostlist_json["status"] == 200:
self.module.debug("RPC call succeeded")
hosts = hostlist_json["data"]["hosts"]
self.module.debug(
"Looking for host matching: hostname " + hostname +
" and collector " + str(collector["id"]))
for host in hosts:
if (host["hostName"] == hostname and
host["agentId"] == collector["id"]):
self.module.debug("Host match found")
return host
self.module.debug("No host match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(hostlist_json)
else:
self.module.debug("No collector specified")
return None
def get_host_by_displayname(self, displayname):
"""Returns a host object for the host matching the
specified display name"""
self.module.debug("Running LogicMonitor.get_host_by_displayname...")
self.module.debug("Looking for displayname " + displayname)
self.module.debug("Making RPC call to 'getHost'")
host_json = (json.loads(self.rpc("getHost",
{"displayName": displayname})))
if host_json["status"] == 200:
self.module.debug("RPC call succeeded")
return host_json["data"]
else:
self.module.debug("RPC call failed")
self.module.debug(host_json)
return None
def get_collector_by_description(self, description):
"""Returns a JSON collector object for the collector
matching the specified FQDN (description)"""
self.module.debug(
"Running LogicMonitor.get_collector_by_description..."
)
collector_list = self.get_collectors()
if collector_list is not None:
self.module.debug("Looking for collector with description {0}" +
description)
for collector in collector_list:
if collector["description"] == description:
self.module.debug("Collector match found")
return collector
self.module.debug("No collector match found")
return None
def get_group(self, fullpath):
"""Returns a JSON group object for the group matching the
specified path"""
self.module.debug("Running LogicMonitor.get_group...")
self.module.debug("Making RPC call to getHostGroups")
resp = json.loads(self.rpc("getHostGroups", {}))
if resp["status"] == 200:
self.module.debug("RPC called succeeded")
groups = resp["data"]
self.module.debug("Looking for group matching " + fullpath)
for group in groups:
if group["fullPath"] == fullpath.lstrip('/'):
self.module.debug("Group match found")
return group
self.module.debug("No group match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
return None
def create_group(self, fullpath):
"""Recursively create a path of host groups.
Returns the id of the newly created hostgroup"""
self.module.debug("Running LogicMonitor.create_group...")
res = self.get_group(fullpath)
if res:
self.module.debug("Group {0} exists." + fullpath)
return res["id"]
if fullpath == "/":
self.module.debug("Specified group is root. Doing nothing.")
return 1
else:
self.module.debug("Creating group named " + fullpath)
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
parentpath, name = fullpath.rsplit('/', 1)
parentgroup = self.get_group(parentpath)
parentid = 1
if parentpath == "":
parentid = 1
elif parentgroup:
parentid = parentgroup["id"]
else:
parentid = self.create_group(parentpath)
h = None
# Determine if we're creating a group from host or hostgroup class
if hasattr(self, '_build_host_group_hash'):
h = self._build_host_group_hash(
fullpath,
self.description,
self.properties,
self.alertenable)
h["name"] = name
h["parentId"] = parentid
else:
h = {"name": name,
"parentId": parentid,
"alertEnable": True,
"description": ""}
self.module.debug("Making RPC call to 'addHostGroup'")
resp = json.loads(
self.rpc("addHostGroup", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]["id"]
elif resp["errmsg"] == "The record already exists":
self.module.debug("The hostgroup already exists")
group = self.get_group(fullpath)
return group["id"]
else:
self.module.debug("RPC call failed")
self.fail(
msg="Error: unable to create new hostgroup \"" +
name + "\".\n" + resp["errmsg"])
def fail(self, msg):
self.module.fail_json(msg=msg, changed=self.change, failed=True)
def exit(self, changed):
self.module.debug("Changed: " + changed)
self.module.exit_json(changed=changed, success=True)
def output_info(self, info):
self.module.debug("Registering properties as Ansible facts")
self.module.exit_json(changed=False, ansible_facts=info)
class Collector(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor Collector object"""
self.change = False
self.params = params
LogicMonitor.__init__(self, module, **params)
self.module.debug("Instantiating Collector object")
if self.params['description']:
self.description = self.params['description']
else:
self.description = self.fqdn
self.info = self._get()
self.installdir = "/usr/local/logicmonitor"
self.platform = platform.system()
self.is_64bits = sys.maxsize > 2**32
self.duration = self.params['duration']
self.starttime = self.params['starttime']
if self.info is None:
self.id = None
else:
self.id = self.info["id"]
def create(self):
"""Idempotent function to make sure that there is
a running collector installed and registered"""
self.module.debug("Running Collector.create...")
self._create()
self.get_installer_binary()
self.install()
def remove(self):
"""Idempotent function to make sure that there is
not a running collector installed and registered"""
self.module.debug("Running Collector.destroy...")
self._unreigster()
self.uninstall()
def get_installer_binary(self):
"""Download the LogicMonitor collector installer binary"""
self.module.debug("Running Collector.get_installer_binary...")
arch = 32
if self.is_64bits:
self.module.debug("64 bit system")
arch = 64
else:
self.module.debug("32 bit system")
if self.platform == "Linux" and self.id is not None:
self.module.debug("Platform is Linux")
self.module.debug("Agent ID is " + str(self.id))
installfilepath = (self.installdir +
"/logicmonitorsetup" +
str(self.id) + "_" + str(arch) +
".bin")
self.module.debug("Looking for existing installer at " +
installfilepath)
if not os.path.isfile(installfilepath):
self.module.debug("No previous installer found")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Downloading installer file")
# attempt to create the install dir before download
self.module.run_command("mkdir " + self.installdir)
try:
f = open(installfilepath, "w")
installer = (self.do("logicmonitorsetup",
{"id": self.id,
"arch": arch}))
f.write(installer)
f.closed
except:
self.fail(msg="Unable to open installer file for writing")
f.closed
else:
self.module.debug("Collector installer already exists")
return installfilepath
elif self.id is None:
self.fail(
msg="Error: There is currently no collector " +
"associated with this device. To download " +
" the installer, first create a collector " +
"for this device.")
elif self.platform != "Linux":
self.fail(
msg="Error: LogicMonitor Collector must be " +
"installed on a Linux device.")
else:
self.fail(
msg="Error: Unable to retrieve the installer from the server")
def install(self):
"""Execute the LogicMonitor installer if not
already installed"""
self.module.debug("Running Collector.install...")
if self.platform == "Linux":
self.module.debug("Platform is Linux")
installer = self.get_installer_binary()
if self.info is None:
self.module.debug("Retriving collector information")
self.info = self._get()
if not os.path.exists(self.installdir + "/agent"):
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Setting installer file permissions")
os.chmod(installer, 484) # decimal for 0o744
self.module.debug("Executing installer")
ret_code, out, err = self.module.run_command(installer + " -y")
if ret_code != 0:
self.fail(msg="Error: Unable to install collector: " + err)
else:
self.module.debug("Collector installed successfully")
else:
self.module.debug("Collector already installed")
else:
self.fail(
msg="Error: LogicMonitor Collector must be " +
"installed on a Linux device")
def uninstall(self):
"""Uninstall LogicMontitor collector from the system"""
self.module.debug("Running Collector.uninstall...")
uninstallfile = self.installdir + "/agent/bin/uninstall.pl"
if os.path.isfile(uninstallfile):
self.module.debug("Collector uninstall file exists")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Running collector uninstaller")
ret_code, out, err = self.module.run_command(uninstallfile)
if ret_code != 0:
self.fail(
msg="Error: Unable to uninstall collector: " + err)
else:
self.module.debug("Collector successfully uninstalled")
else:
if os.path.exists(self.installdir + "/agent"):
(self.fail(
msg="Unable to uninstall LogicMonitor " +
"Collector. Can not find LogicMonitor " +
"uninstaller."))
def sdt(self):
"""Create a scheduled down time
(maintenance window) for this host"""
self.module.debug("Running Collector.sdt...")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
duration = self.duration
starttime = self.starttime
offsetstart = starttime
if starttime:
self.module.debug("Start time specified")
start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
offsetstart = start
else:
self.module.debug("No start time specified. Using default.")
start = datetime.datetime.utcnow()
# Use user UTC offset
self.module.debug("Making RPC call to 'getTimeZoneSetting'")
accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
if accountresp["status"] == 200:
self.module.debug("RPC call succeeded")
offset = accountresp["data"]["offset"]
offsetstart = start + datetime.timedelta(0, offset)
else:
self.fail(msg="Error: Unable to retrieve timezone offset")
offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
h = {"agentId": self.id,
"type": 1,
"notifyCC": True,
"year": offsetstart.year,
"month": offsetstart.month-1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
"endMonth": offsetend.month-1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
self.module.debug("Making RPC call to 'setAgentSDT'")
resp = json.loads(self.rpc("setAgentSDT", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=resp["errmsg"])
def site_facts(self):
"""Output current properties information for the Collector"""
self.module.debug("Running Collector.site_facts...")
if self.info:
self.module.debug("Collector exists")
props = self.get_properties(True)
self.output_info(props)
else:
self.fail(msg="Error: Collector doesn't exit.")
def _get(self):
"""Returns a JSON object representing this collector"""
self.module.debug("Running Collector._get...")
collector_list = self.get_collectors()
if collector_list is not None:
self.module.debug("Collectors returned")
for collector in collector_list:
if collector["description"] == self.description:
return collector
else:
self.module.debug("No collectors returned")
return None
def _create(self):
"""Create a new collector in the associated
LogicMonitor account"""
self.module.debug("Running Collector._create...")
if self.platform == "Linux":
self.module.debug("Platform is Linux")
ret = self.info or self._get()
if ret is None:
self.change = True
self.module.debug("System changed")
if self.check_mode:
self.exit(changed=True)
h = {"autogen": True,
"description": self.description}
self.module.debug("Making RPC call to 'addAgent'")
create = (json.loads(self.rpc("addAgent", h)))
if create["status"] is 200:
self.module.debug("RPC call succeeded")
self.info = create["data"]
self.id = create["data"]["id"]
return create["data"]
else:
self.fail(msg=create["errmsg"])
else:
self.info = ret
self.id = ret["id"]
return ret
else:
self.fail(
msg="Error: LogicMonitor Collector must be " +
"installed on a Linux device.")
def _unreigster(self):
"""Delete this collector from the associated
LogicMonitor account"""
self.module.debug("Running Collector._unreigster...")
if self.info is None:
self.module.debug("Retrieving collector information")
self.info = self._get()
if self.info is not None:
self.module.debug("Collector found")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Making RPC call to 'deleteAgent'")
delete = json.loads(self.rpc("deleteAgent",
{"id": self.id}))
if delete["status"] is 200:
self.module.debug("RPC call succeeded")
return delete
else:
# The collector couldn't unregister. Start the service again
self.module.debug("Error unregistering collecting. " +
delete["errmsg"])
self.fail(msg=delete["errmsg"])
else:
self.module.debug("Collector not found")
return None
class Host(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
self.collector = None
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Host object")
if self.params["hostname"]:
self.module.debug("Hostname is " + self.params["hostname"])
self.hostname = self.params['hostname']
else:
self.module.debug("No hostname specified. Using " + self.fqdn)
self.hostname = self.fqdn
if self.params["displayname"]:
self.module.debug("Display name is " + self.params["displayname"])
self.displayname = self.params['displayname']
else:
self.module.debug("No display name specified. Using " + self.fqdn)
self.displayname = self.fqdn
# Attempt to host information via display name of host name
self.module.debug("Attempting to find host by displayname " +
self.displayname)
info = self.get_host_by_displayname(self.displayname)
if info is not None:
self.module.debug("Host found by displayname")
# Used the host information to grab the collector description
# if not provided
if (not hasattr(self.params, "collector") and
"agentDescription" in info):
self.module.debug("Setting collector from host response. " +
"Collector " + info["agentDescription"])
self.params["collector"] = info["agentDescription"]
else:
self.module.debug("Host not found by displayname")
# At this point, a valid collector description is required for success
# Check that the description exists or fail
if self.params["collector"]:
self.module.debug(
"Collector specified is " +
self.params["collector"]
)
self.collector = (self.get_collector_by_description(
self.params["collector"]))
else:
self.fail(msg="No collector specified.")
# If the host wasn't found via displayname, attempt by hostname
if info is None:
self.module.debug("Attempting to find host by hostname " +
self.hostname)
info = self.get_host_by_hostname(self.hostname, self.collector)
self.info = info
self.properties = self.params["properties"]
self.description = self.params["description"]
self.starttime = self.params["starttime"]
self.duration = self.params["duration"]
self.alertenable = self.params["alertenable"]
if self.params["groups"] is not None:
self.groups = self._strip_groups(self.params["groups"])
else:
self.groups = None
def create(self):
"""Idemopotent function to create if missing,
update if changed, or skip"""
self.module.debug("Running Host.create...")
self.update()
def get_properties(self):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Host.get_properties...")
if self.info:
self.module.debug("Making RPC call to 'getHostProperties'")
properties_json = (json.loads(self.rpc("getHostProperties",
{'hostId': self.info["id"],
"filterSystemProperties": True})))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("Error: there was an issue retrieving the " +
"host properties")
self.module.debug(properties_json["errmsg"])
self.fail(msg=properties_json["status"])
else:
self.module.debug(
"Unable to find LogicMonitor host which matches " +
self.displayname + " (" + self.hostname + ")"
)
return None
def set_properties(self, propertyhash):
"""update the host to have the properties
contained in the property hash"""
self.module.debug("Running Host.set_properties...")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Assigning property hash to host object")
self.properties = propertyhash
def add(self):
"""Add this device to monitoring
in your LogicMonitor account"""
self.module.debug("Running Host.add...")
if self.collector and not self.info:
self.module.debug("Host not registered. Registering.")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
h = self._build_host_hash(
self.hostname,
self.displayname,
self.collector,
self.description,
self.groups,
self.properties,
self.alertenable)
self.module.debug("Making RPC call to 'addHost'")
resp = json.loads(self.rpc("addHost", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
return resp["errmsg"]
elif self.collector is None:
self.fail(msg="Specified collector doesn't exist")
else:
self.module.debug("Host already registered")
def update(self):
"""This method takes changes made to this host
and applies them to the corresponding host
in your LogicMonitor account."""
self.module.debug("Running Host.update...")
if self.info:
self.module.debug("Host already registed")
if self.is_changed():
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
h = (self._build_host_hash(
self.hostname,
self.displayname,
self.collector,
self.description,
self.groups,
self.properties,
self.alertenable))
h["id"] = self.info["id"]
h["opType"] = "replace"
self.module.debug("Making RPC call to 'updateHost'")
resp = json.loads(self.rpc("updateHost", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
else:
self.module.debug("RPC call failed")
self.fail(msg="Error: unable to update the host.")
else:
self.module.debug(
"Host properties match supplied properties. " +
"No changes to make."
)
return self.info
else:
self.module.debug("Host not registed. Registering")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
return self.add()
def remove(self):
"""Remove this host from your LogicMonitor account"""
self.module.debug("Running Host.remove...")
if self.info:
self.module.debug("Host registered")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Making RPC call to 'deleteHost'")
resp = json.loads(self.rpc("deleteHost",
{"hostId": self.info["id"],
"deleteFromSystem": True,
"hostGroupId": 1}))
if resp["status"] == 200:
self.module.debug(resp)
self.module.debug("RPC call succeeded")
return resp
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
self.fail(msg=resp["errmsg"])
else:
self.module.debug("Host not registered")
def is_changed(self):
"""Return true if the host doesn't
match the LogicMonitor account"""
self.module.debug("Running Host.is_changed")
ignore = ['system.categories', 'snmp.version']
hostresp = self.get_host_by_displayname(self.displayname)
if hostresp is None:
hostresp = self.get_host_by_hostname(self.hostname, self.collector)
if hostresp:
self.module.debug("Comparing simple host properties")
if hostresp["alertEnable"] != self.alertenable:
return True
if hostresp["description"] != self.description:
return True
if hostresp["displayedAs"] != self.displayname:
return True
if (self.collector and
hasattr(self.collector, "id") and
hostresp["agentId"] != self.collector["id"]):
return True
self.module.debug("Comparing groups.")
if self._compare_groups(hostresp) is True:
return True
propresp = self.get_properties()
if propresp:
self.module.debug("Comparing properties.")
if self._compare_props(propresp, ignore) is True:
return True
else:
self.fail(
msg="Error: Unknown error retrieving host properties")
return False
else:
self.fail(msg="Error: Unknown error retrieving host information")
def sdt(self):
"""Create a scheduled down time
(maintenance window) for this host"""
self.module.debug("Running Host.sdt...")
if self.info:
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
duration = self.duration
starttime = self.starttime
offset = starttime
if starttime:
self.module.debug("Start time specified")
start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
offsetstart = start
else:
self.module.debug("No start time specified. Using default.")
start = datetime.datetime.utcnow()
# Use user UTC offset
self.module.debug("Making RPC call to 'getTimeZoneSetting'")
accountresp = (json.loads(self.rpc("getTimeZoneSetting", {})))
if accountresp["status"] == 200:
self.module.debug("RPC call succeeded")
offset = accountresp["data"]["offset"]
offsetstart = start + datetime.timedelta(0, offset)
else:
self.fail(
msg="Error: Unable to retrieve timezone offset")
offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
h = {"hostId": self.info["id"],
"type": 1,
"year": offsetstart.year,
"month": offsetstart.month - 1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
"endMonth": offsetend.month - 1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
self.module.debug("Making RPC call to 'setHostSDT'")
resp = (json.loads(self.rpc("setHostSDT", h)))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=resp["errmsg"])
else:
self.fail(msg="Error: Host doesn't exit.")
def site_facts(self):
"""Output current properties information for the Host"""
self.module.debug("Running Host.site_facts...")
if self.info:
self.module.debug("Host exists")
props = self.get_properties()
self.output_info(props)
else:
self.fail(msg="Error: Host doesn't exit.")
def _build_host_hash(self,
hostname,
displayname,
collector,
description,
groups,
properties,
alertenable):
"""Return a property formated hash for the
creation of a host using the rpc function"""
self.module.debug("Running Host._build_host_hash...")
h = {}
h["hostName"] = hostname
h["displayedAs"] = displayname
h["alertEnable"] = alertenable
if collector:
self.module.debug("Collector property exists")
h["agentId"] = collector["id"]
else:
self.fail(
msg="Error: No collector found. Unable to build host hash.")
if description:
h["description"] = description
if groups is not None and groups is not []:
self.module.debug("Group property exists")
groupids = ""
for group in groups:
groupids = groupids + str(self.create_group(group)) + ","
h["hostGroupIds"] = groupids.rstrip(',')
if properties is not None and properties is not {}:
self.module.debug("Properties hash exists")
propnum = 0
for key, value in properties.iteritems():
h["propName" + str(propnum)] = key
h["propValue" + str(propnum)] = value
propnum = propnum + 1
return h
def _verify_property(self, propname):
"""Check with LogicMonitor server to
verify property is unchanged"""
self.module.debug("Running Host._verify_property...")
if self.info:
self.module.debug("Host is registered")
if propname not in self.properties:
self.module.debug("Property " + propname + " does not exist")
return False
else:
self.module.debug("Property " + propname + " exists")
h = {"hostId": self.info["id"],
"propName0": propname,
"propValue0": self.properties[propname]}
self.module.debug("Making RCP call to 'verifyProperties'")
resp = json.loads(self.rpc('verifyProperties', h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]["match"]
else:
self.fail(
msg="Error: unable to get verification " +
"from server.\n%s" % resp["errmsg"])
else:
self.fail(
msg="Error: Host doesn't exist. Unable to verify properties")
def _compare_groups(self, hostresp):
"""Function to compare the host's current
groups against provided groups"""
self.module.debug("Running Host._compare_groups")
g = []
fullpathinids = hostresp["fullPathInIds"]
self.module.debug("Building list of groups")
for path in fullpathinids:
if path != []:
h = {'hostGroupId': path[-1]}
hgresp = json.loads(self.rpc("getHostGroup", h))
if (hgresp["status"] == 200 and
hgresp["data"]["appliesTo"] == ""):
g.append(path[-1])
if self.groups is not None:
self.module.debug("Comparing group lists")
for group in self.groups:
groupjson = self.get_group(group)
if groupjson is None:
self.module.debug("Group mismatch. No result.")
return True
elif groupjson['id'] not in g:
self.module.debug("Group mismatch. ID doesn't exist.")
return True
else:
g.remove(groupjson['id'])
if g != []:
self.module.debug("Group mismatch. New ID exists.")
return True
self.module.debug("Groups match")
def _compare_props(self, propresp, ignore):
"""Function to compare the host's current
properties against provided properties"""
self.module.debug("Running Host._compare_props...")
p = {}
self.module.debug("Creating list of properties")
for prop in propresp:
if prop["name"] not in ignore:
if ("*******" in prop["value"] and
self._verify_property(prop["name"])):
p[prop["name"]] = self.properties[prop["name"]]
else:
p[prop["name"]] = prop["value"]
self.module.debug("Comparing properties")
# Iterate provided properties and compare to received properties
for prop in self.properties:
if (prop not in p or
p[prop] != self.properties[prop]):
self.module.debug("Properties mismatch")
return True
self.module.debug("Properties match")
def _strip_groups(self, groups):
"""Function to strip whitespace from group list.
This function provides the user some flexibility when
formatting group arguments """
self.module.debug("Running Host._strip_groups...")
return map(lambda x: x.strip(), groups)
class Datasource(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor Datasource object"""
self.change = False
self.params = params
LogicMonitor.__init__(self, module, **params)
self.module.debug("Instantiating Datasource object")
self.id = self.params["id"]
self.starttime = self.params["starttime"]
self.duration = self.params["duration"]
def sdt(self):
"""Create a scheduled down time
(maintenance window) for this host"""
self.module.debug("Running Datasource.sdt...")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
duration = self.duration
starttime = self.starttime
offsetstart = starttime
if starttime:
self.module.debug("Start time specified")
start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
offsetstart = start
else:
self.module.debug("No start time specified. Using default.")
start = datetime.datetime.utcnow()
# Use user UTC offset
self.module.debug("Making RPC call to 'getTimeZoneSetting'")
accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
if accountresp["status"] == 200:
self.module.debug("RPC call succeeded")
offset = accountresp["data"]["offset"]
offsetstart = start + datetime.timedelta(0, offset)
else:
self.fail(msg="Error: Unable to retrieve timezone offset")
offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
h = {"hostDataSourceId": self.id,
"type": 1,
"notifyCC": True,
"year": offsetstart.year,
"month": offsetstart.month-1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
"endMonth": offsetend.month-1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
self.module.debug("Making RPC call to 'setHostDataSourceSDT'")
resp = json.loads(self.rpc("setHostDataSourceSDT", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=resp["errmsg"])
class Hostgroup(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Hostgroup object")
self.fullpath = self.params["fullpath"]
self.info = self.get_group(self.fullpath)
self.properties = self.params["properties"]
self.description = self.params["description"]
self.starttime = self.params["starttime"]
self.duration = self.params["duration"]
self.alertenable = self.params["alertenable"]
def create(self):
"""Wrapper for self.update()"""
self.module.debug("Running Hostgroup.create...")
self.update()
def get_properties(self, final=False):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Hostgroup.get_properties...")
if self.info:
self.module.debug("Group found")
self.module.debug("Making RPC call to 'getHostGroupProperties'")
properties_json = json.loads(self.rpc(
"getHostGroupProperties",
{'hostGroupId': self.info["id"],
"finalResult": final}))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=properties_json["status"])
else:
self.module.debug("Group not found")
return None
def set_properties(self, propertyhash):
"""Update the host to have the properties
contained in the property hash"""
self.module.debug("Running Hostgroup.set_properties")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Assigning property has to host object")
self.properties = propertyhash
def add(self):
"""Idempotent function to ensure that the host
group exists in your LogicMonitor account"""
self.module.debug("Running Hostgroup.add")
if self.info is None:
self.module.debug("Group doesn't exist. Creating.")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.create_group(self.fullpath)
self.info = self.get_group(self.fullpath)
self.module.debug("Group created")
return self.info
else:
self.module.debug("Group already exists")
def update(self):
"""Idempotent function to ensure the host group settings
(alertenable, properties, etc) in the
LogicMonitor account match the current object."""
self.module.debug("Running Hostgroup.update")
if self.info:
if self.is_changed():
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
h = self._build_host_group_hash(
self.fullpath,
self.description,
self.properties,
self.alertenable)
h["opType"] = "replace"
if self.fullpath != "/":
h["id"] = self.info["id"]
self.module.debug("Making RPC call to 'updateHostGroup'")
resp = json.loads(self.rpc("updateHostGroup", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg="Error: Unable to update the " +
"host.\n" + resp["errmsg"])
else:
self.module.debug(
"Group properties match supplied properties. " +
"No changes to make"
)
return self.info
else:
self.module.debug("Group doesn't exist. Creating.")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
return self.add()
def remove(self):
"""Idempotent function to ensure the host group
does not exist in your LogicMonitor account"""
self.module.debug("Running Hostgroup.remove...")
if self.info:
self.module.debug("Group exists")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
self.module.debug("Making RPC call to 'deleteHostGroup'")
resp = json.loads(self.rpc("deleteHostGroup",
{"hgId": self.info["id"]}))
if resp["status"] == 200:
self.module.debug(resp)
self.module.debug("RPC call succeeded")
return resp
elif resp["errmsg"] == "No such group":
self.module.debug("Group doesn't exist")
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
self.fail(msg=resp["errmsg"])
else:
self.module.debug("Group doesn't exist")
def is_changed(self):
"""Return true if the host doesn't match
the LogicMonitor account"""
self.module.debug("Running Hostgroup.is_changed...")
ignore = []
group = self.get_group(self.fullpath)
properties = self.get_properties()
if properties is not None and group is not None:
self.module.debug("Comparing simple group properties")
if (group["alertEnable"] != self.alertenable or
group["description"] != self.description):
return True
p = {}
self.module.debug("Creating list of properties")
for prop in properties:
if prop["name"] not in ignore:
if ("*******" in prop["value"] and
self._verify_property(prop["name"])):
p[prop["name"]] = (
self.properties[prop["name"]])
else:
p[prop["name"]] = prop["value"]
self.module.debug("Comparing properties")
if set(p) != set(self.properties):
return True
else:
self.module.debug("No property information received")
return False
def sdt(self, duration=30, starttime=None):
"""Create a scheduled down time
(maintenance window) for this host"""
self.module.debug("Running Hostgroup.sdt")
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
duration = self.duration
starttime = self.starttime
offset = starttime
if starttime:
self.module.debug("Start time specified")
start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
offsetstart = start
else:
self.module.debug("No start time specified. Using default.")
start = datetime.datetime.utcnow()
# Use user UTC offset
self.module.debug("Making RPC call to 'getTimeZoneSetting'")
accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
if accountresp["status"] == 200:
self.module.debug("RPC call succeeded")
offset = accountresp["data"]["offset"]
offsetstart = start + datetime.timedelta(0, offset)
else:
self.fail(
msg="Error: Unable to retrieve timezone offset")
offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
h = {"hostGroupId": self.info["id"],
"type": 1,
"year": offsetstart.year,
"month": offsetstart.month-1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
"endMonth": offsetend.month-1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
self.module.debug("Making RPC call to setHostGroupSDT")
resp = json.loads(self.rpc("setHostGroupSDT", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=resp["errmsg"])
def site_facts(self):
"""Output current properties information for the Hostgroup"""
self.module.debug("Running Hostgroup.site_facts...")
if self.info:
self.module.debug("Group exists")
props = self.get_properties(True)
self.output_info(props)
else:
self.fail(msg="Error: Group doesn't exit.")
def _build_host_group_hash(self,
fullpath,
description,
properties,
alertenable):
"""Return a property formated hash for the
creation of a hostgroup using the rpc function"""
self.module.debug("Running Hostgroup._build_host_hash")
h = {}
h["alertEnable"] = alertenable
if fullpath == "/":
self.module.debug("Group is root")
h["id"] = 1
else:
self.module.debug("Determining group path")
parentpath, name = fullpath.rsplit('/', 1)
parent = self.get_group(parentpath)
h["name"] = name
if parent:
self.module.debug("Parent group " +
str(parent["id"]) + " found.")
h["parentID"] = parent["id"]
else:
self.module.debug("No parent group found. Using root.")
h["parentID"] = 1
if description:
self.module.debug("Description property exists")
h["description"] = description
if properties != {}:
self.module.debug("Properties hash exists")
propnum = 0
for key, value in properties.iteritems():
h["propName" + str(propnum)] = key
h["propValue" + str(propnum)] = value
propnum = propnum + 1
return h
def _verify_property(self, propname):
"""Check with LogicMonitor server
to verify property is unchanged"""
self.module.debug("Running Hostgroup._verify_property")
if self.info:
self.module.debug("Group exists")
if propname not in self.properties:
self.module.debug("Property " + propname + " does not exist")
return False
else:
self.module.debug("Property " + propname + " exists")
h = {"hostGroupId": self.info["id"],
"propName0": propname,
"propValue0": self.properties[propname]}
self.module.debug("Making RCP call to 'verifyProperties'")
resp = json.loads(self.rpc('verifyProperties', h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]["match"]
else:
self.fail(
msg="Error: unable to get verification " +
"from server.\n%s" % resp["errmsg"])
else:
self.fail(
msg="Error: Group doesn't exist. Unable to verify properties")
def selector(module):
"""Figure out which object and which actions
to take given the right parameters"""
if module.params["target"] == "collector":
target = Collector(module.params, module)
elif module.params["target"] == "host":
# Make sure required parameter collector is specified
if ((module.params["action"] == "add" or
module.params["displayname"] is None) and
module.params["collector"] is None):
module.fail_json(
msg="Parameter 'collector' required.")
target = Host(module.params, module)
elif module.params["target"] == "datasource":
# Validate target specific required parameters
if module.params["id"] is not None:
# make sure a supported action was specified
if module.params["action"] == "sdt":
target = Datasource(module.params, module)
else:
errmsg = ("Error: Unexpected action \"" +
module.params["action"] + "\" was specified.")
module.fail_json(msg=errmsg)
elif module.params["target"] == "hostgroup":
# Validate target specific required parameters
if module.params["fullpath"] is not None:
target = Hostgroup(module.params, module)
else:
module.fail_json(
msg="Parameter 'fullpath' required for target 'hostgroup'")
else:
module.fail_json(
msg="Error: Unexpected target \"" + module.params["target"] +
"\" was specified.")
if module.params["action"].lower() == "add":
action = target.create
elif module.params["action"].lower() == "remove":
action = target.remove
elif module.params["action"].lower() == "sdt":
action = target.sdt
elif module.params["action"].lower() == "update":
action = target.update
else:
errmsg = ("Error: Unexpected action \"" + module.params["action"] +
"\" was specified.")
module.fail_json(msg=errmsg)
action()
module.exit_json(changed=target.change)
def main():
TARGETS = [
"collector",
"host",
"datasource",
"hostgroup"]
ACTIONS = [
"add",
"remove",
"sdt",
"update"]
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, default=None, choices=TARGETS),
action=dict(required=True, default=None, choices=ACTIONS),
company=dict(required=True, default=None),
user=dict(required=True, default=None),
password=dict(required=True, default=None, no_log=True),
collector=dict(required=False, default=None),
hostname=dict(required=False, default=None),
displayname=dict(required=False, default=None),
id=dict(required=False, default=None),
description=dict(required=False, default=""),
fullpath=dict(required=False, default=None),
starttime=dict(required=False, default=None),
duration=dict(required=False, default=30),
properties=dict(required=False, default={}, type="dict"),
groups=dict(required=False, default=[], type="list"),
alertenable=dict(required=False, default="true", choices=BOOLEANS)
),
supports_check_mode=True
)
if HAS_LIB_JSON is not True:
module.fail_json(msg="Unable to load JSON library")
selector(module)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.urls import open_url
if __name__ == "__main__":
main()
| gpl-3.0 |
tinchoss/Python_Android | python/src/Lib/idlelib/textView.py | 72 | 3246 | """Simple text browser for IDLE
"""
from Tkinter import *
import tkMessageBox
class TextViewer(Toplevel):
"""A simple text viewer dialog for IDLE
"""
def __init__(self, parent, title, text):
"""Show the given text in a scrollable window with a 'close' button
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("=%dx%d+%d+%d" % (625, 500,
parent.winfo_rootx() + 10,
parent.winfo_rooty() + 10))
#elguavas - config placeholders til config stuff completed
self.bg = '#ffffff'
self.fg = '#000000'
self.CreateWidgets()
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.textView.focus_set()
#key bindings for this dialog
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.textView.insert(0.0, text)
self.textView.config(state=DISABLED)
self.wait_window()
def CreateWidgets(self):
frameText = Frame(self, relief=SUNKEN, height=700)
frameButtons = Frame(self)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok, takefocus=FALSE)
self.scrollbarView = Scrollbar(frameText, orient=VERTICAL,
takefocus=FALSE, highlightthickness=0)
self.textView = Text(frameText, wrap=WORD, highlightthickness=0,
fg=self.fg, bg=self.bg)
self.scrollbarView.config(command=self.textView.yview)
self.textView.config(yscrollcommand=self.scrollbarView.set)
self.buttonOk.pack()
self.scrollbarView.pack(side=RIGHT,fill=Y)
self.textView.pack(side=LEFT,expand=TRUE,fill=BOTH)
frameButtons.pack(side=BOTTOM,fill=X)
frameText.pack(side=TOP,expand=TRUE,fill=BOTH)
def Ok(self, event=None):
self.destroy()
def view_text(parent, title, text):
TextViewer(parent, title, text)
def view_file(parent, title, filename, encoding=None):
try:
if encoding:
import codecs
textFile = codecs.open(filename, 'r')
else:
textFile = open(filename, 'r')
except IOError:
import tkMessageBox
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file %r .' % filename,
parent=parent)
else:
return view_text(parent, title, textFile.read())
if __name__ == '__main__':
#test the dialog
root=Tk()
root.title('textView test')
filename = './textView.py'
text = file(filename, 'r').read()
btn1 = Button(root, text='view_text',
command=lambda:view_text(root, 'view_text', text))
btn1.pack(side=LEFT)
btn2 = Button(root, text='view_file',
command=lambda:view_file(root, 'view_file', filename))
btn2.pack(side=LEFT)
close = Button(root, text='Close', command=root.destroy)
close.pack(side=RIGHT)
root.mainloop()
| apache-2.0 |
Gurupradeep/Remy-in-ns3 | bindings/python/rad_util.py | 212 | 26013 | # Copyright (c) 2007 RADLogic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provide various handy Python functions.
Running this script directly will execute the doctests.
Functions:
int2bin(i, n) -- Convert integer to binary string.
bin2int(bin_string) -- Convert binary string to integer.
reverse(input_string) -- Reverse a string.
transpose(matrix) -- Transpose a list of lists.
polygon_area(points_list) -- Calculate the area of an arbitrary polygon.
timestamp() -- Return string containing current time stamp.
pt2str(point) -- Return prettier string version of point tuple.
gcf(a, b) -- Return the greatest common factor of two numbers.
lcm(a, b) -- Return the least common multiple of two numbers.
permutations(input_list) -- Generate all permutations of a list of items.
reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form.
quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1.
trim(l) -- Discard values in list more than 1.5*IQR outside IQR.
nice_units(value) -- Return value converted to human readable units.
uniquify(seq) -- Return sequence with duplicate items in sequence seq removed.
reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa.
lsb(x, n) -- Return the n least significant bits of x.
gray_encode(i) -- Gray encode the given integer.
random_vec(bits, max_value=None) -- Return a random binary vector.
binary_range(bits) -- Return list of all possible binary numbers width=bits.
float_range([start], stop, [step]) -- Return range of floats.
find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings.
is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list.
getmodule(obj) -- Return the module that contains the object definition of obj.
(use inspect.getmodule instead, though)
get_args(argv) -- Store command-line args in a dictionary.
This module requires Python >= 2.2
"""
__author__ = 'Tim Wegener <twegener@radlogic.com.au>'
__date__ = '$Date: 2007/03/27 03:15:06 $'
__version__ = '$Revision: 0.45 $'
__credits__ = """
David Chandler, for polygon area algorithm.
(http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf)
"""
import re
import sys
import time
import random
try:
True, False
except NameError:
True, False = (1==1, 0==1)
def int2bin(i, n):
"""Convert decimal integer i to n-bit binary number (string).
>>> int2bin(0, 8)
'00000000'
>>> int2bin(123, 8)
'01111011'
>>> int2bin(123L, 8)
'01111011'
>>> int2bin(15, 2)
Traceback (most recent call last):
ValueError: Value too large for given number of bits.
"""
hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
# Convert to hex then map each hex digit to binary equivalent.
result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]])
# Shrink result to appropriate length.
# Raise an error if the value is changed by the truncation.
if '1' in result[:-n]:
raise ValueError("Value too large for given number of bits.")
result = result[-n:]
# Zero-pad if length longer than mapped result.
result = '0'*(n-len(result)) + result
return result
def bin2int(bin_string):
"""Convert binary number string to decimal integer.
Note: Python > v2 has int(bin_string, 2)
>>> bin2int('1111')
15
>>> bin2int('0101')
5
"""
## result = 0
## bin_list = list(bin_string)
## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list):
## raise Exception ("bin2int: Error - not a binary number: %s"
## % bin_string)
## bit_list = map(int, bin_list)
## bit_list.reverse() # Make most significant bit have highest index.
## for bit_place in range(len(bit_list)):
## result = result + ((2**bit_place) * bit_list[bit_place])
## return result
return int(bin_string, 2)
def reverse(input_string):
"""Reverse a string. Useful for strings of binary numbers.
>>> reverse('abc')
'cba'
"""
str_list = list(input_string)
str_list.reverse()
return ''.join(str_list)
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result
def polygon_area(points_list, precision=100):
"""Calculate area of an arbitrary polygon using an algorithm from the web.
Return the area of the polygon as a positive float.
Arguments:
points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...]
(Unclosed polygons will be closed automatically.
precision -- Internal arithmetic precision (integer arithmetic).
>>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)])
3.0
Credits:
Area of a General Polygon by David Chandler
http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf
"""
# Scale up co-ordinates and convert them to integers.
for i in range(len(points_list)):
points_list[i] = (int(points_list[i][0] * precision),
int(points_list[i][1] * precision))
# Close polygon if not closed.
if points_list[-1] != points_list[0]:
points_list.append(points_list[0])
# Calculate area.
area = 0
for i in range(len(points_list)-1):
(x_i, y_i) = points_list[i]
(x_i_plus_1, y_i_plus_1) = points_list[i+1]
area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i)
area = abs(area / 2)
# Unscale area.
area = float(area)/(precision**2)
return area
def timestamp():
"""Return string containing current time stamp.
Note: In Python 2 onwards can use time.asctime() with no arguments.
"""
return time.asctime()
def pt2str(point):
"""Return prettier string version of point tuple.
>>> pt2str((1.8, 1.9))
'(1.8, 1.9)'
"""
return "(%s, %s)" % (str(point[0]), str(point[1]))
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
def lcm(a, b, precision=None):
"""Return the least common multiple of a and b, using the gcf function.
Arguments:
a, b -- two numbers. If both are integers return an integer result,
otherwise a return a float result.
precision -- scaling factor if a and/or b are floats.
>>> lcm(21, 6)
42
>>> lcm(2.5, 3.5)
17.5
>>> str(lcm(1.5e-8, 2.5e-8, precision=1e9))
'7.5e-08'
By (an arbitary) definition:
>>> lcm(0, 0)
0
"""
# Note: Dummy precision argument is for backwards compatibility.
# Do the division first.
# (See http://en.wikipedia.org/wiki/Least_common_multiple )
denom = gcf(a, b)
if denom == 0:
result = 0
else:
result = a * (b / denom)
return result
def permutations(input_list):
"""Return a list containing all permutations of the input list.
Note: This is a recursive function.
>>> perms = permutations(['a', 'b', 'c'])
>>> perms.sort()
>>> for perm in perms:
... print perm
['a', 'b', 'c']
['a', 'c', 'b']
['b', 'a', 'c']
['b', 'c', 'a']
['c', 'a', 'b']
['c', 'b', 'a']
"""
out_lists = []
if len(input_list) > 1:
# Extract first item in list.
item = input_list[0]
# Find all permutations of remainder of list. (Recursive call.)
sub_lists = permutations(input_list[1:])
# For every permutation of the sub list...
for sub_list in sub_lists:
# Insert the extracted first item at every position of the list.
for i in range(len(input_list)):
new_list = sub_list[:]
new_list.insert(i, item)
out_lists.append(new_list)
else:
# Termination condition: only one item in input list.
out_lists = [input_list]
return out_lists
def reduce_fraction(fraction):
"""Reduce fraction tuple to simplest form. fraction=(num, denom)
>>> reduce_fraction((14, 7))
(2, 1)
>>> reduce_fraction((-2, 4))
(-1, 2)
>>> reduce_fraction((0, 4))
(0, 1)
>>> reduce_fraction((4, 0))
(1, 0)
"""
(numerator, denominator) = fraction
common_factor = abs(gcf(numerator, denominator))
result = (numerator/common_factor, denominator/common_factor)
return result
def quantile(l, p):
"""Return p quantile of list l. E.g. p=0.25 for q1.
See:
http://rweb.stat.umn.edu/R/library/base/html/quantile.html
"""
l_sort = l[:]
l_sort.sort()
n = len(l)
r = 1 + ((n - 1) * p)
i = int(r)
f = r - i
if i < n:
result = (1-f)*l_sort[i-1] + f*l_sort[i]
else:
result = l_sort[i-1]
return result
def trim(l):
"""Discard values in list more than 1.5*IQR outside IQR.
(IQR is inter-quartile-range)
This function uses rad_util.quantile
1.5*IQR -- mild outlier
3*IQR -- extreme outlier
See:
http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
"""
l_sort = l[:]
l_sort.sort()
# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)
if len(l_sort) % 2 == 0:
# If even number of scores, average middle 2.
index = int(len(l_sort) / 2) # Integer division correct
median = float(l_sort[index] + l_sort[index-1]) / 2
else:
# int divsion gives mid value when count from 0
index = int(len(l_sort) / 2)
median = l_sort[index]
# Calculate IQR.
q1 = quantile(l_sort, 0.25)
q3 = quantile(l_sort, 0.75)
iqr = q3 - q1
iqr_extra = iqr * 1.5
def in_interval(x, i=iqr_extra, q1=q1, q3=q3):
return (x >= q1-i and x <= q3+i)
l_trimmed = [x for x in l_sort if in_interval(x)]
return l_trimmed
def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ',
use_extra_prefixes=False, use_full_name=False, mode='si'):
"""Return value converted to human readable units eg milli, micro, etc.
Arguments:
value -- number in base units
dp -- number of decimal places to display (rounded)
sigfigs -- number of significant figures to display (rounded)
This overrides dp if set.
suffix -- optional unit suffix to append to unit multiplier
space -- seperator between value and unit multiplier (default: ' ')
use_extra_prefixes -- use hecto, deka, deci and centi as well if set.
(default: False)
use_full_name -- use full name for multiplier symbol,
e.g. milli instead of m
(default: False)
mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.)
(Default: 'si')
SI prefixes from:
http://physics.nist.gov/cuu/Units/prefixes.html
(Greek mu changed to u.)
Binary prefixes based on:
http://physics.nist.gov/cuu/Units/binary.html
>>> nice_units(2e-11)
'20 p'
>>> nice_units(2e-11, space='')
'20p'
"""
si_prefixes = {1e24: ('Y', 'yotta'),
1e21: ('Z', 'zetta'),
1e18: ('E', 'exa'),
1e15: ('P', 'peta'),
1e12: ('T', 'tera'),
1e9: ('G', 'giga'),
1e6: ('M', 'mega'),
1e3: ('k', 'kilo'),
1e-3: ('m', 'milli'),
1e-6: ('u', 'micro'),
1e-9: ('n', 'nano'),
1e-12: ('p', 'pico'),
1e-15: ('f', 'femto'),
1e-18: ('a', 'atto'),
1e-21: ('z', 'zepto'),
1e-24: ('y', 'yocto')
}
if use_extra_prefixes:
si_prefixes.update({1e2: ('h', 'hecto'),
1e1: ('da', 'deka'),
1e-1: ('d', 'deci'),
1e-2: ('c', 'centi')
})
bin_prefixes = {2**10: ('K', 'kilo'),
2**20: ('M', 'mega'),
2**30: ('G', 'mega'),
2**40: ('T', 'tera'),
2**50: ('P', 'peta'),
2**60: ('E', 'exa')
}
if mode == 'bin':
prefixes = bin_prefixes
else:
prefixes = si_prefixes
prefixes[1] = ('', '') # Unity.
# Determine appropriate multiplier.
multipliers = prefixes.keys()
multipliers.sort()
mult = None
for i in range(len(multipliers) - 1):
lower_mult = multipliers[i]
upper_mult = multipliers[i+1]
if lower_mult <= value < upper_mult:
mult_i = i
break
if mult is None:
if value < multipliers[0]:
mult_i = 0
elif value >= multipliers[-1]:
mult_i = len(multipliers) - 1
mult = multipliers[mult_i]
# Convert value for this multiplier.
new_value = value / mult
# Deal with special case due to rounding.
if sigfigs is None:
if mult_i < (len(multipliers) - 1) and \
round(new_value, dp) == \
round((multipliers[mult_i+1] / mult), dp):
mult = multipliers[mult_i + 1]
new_value = value / mult
# Concatenate multiplier symbol.
if use_full_name:
label_type = 1
else:
label_type = 0
# Round and truncate to appropriate precision.
if sigfigs is None:
str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {})
else:
str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {})
return str_value + space + prefixes[mult][label_type] + suffix
def uniquify(seq, preserve_order=False):
"""Return sequence with duplicate items in sequence seq removed.
The code is based on usenet post by Tim Peters.
This code is O(N) if the sequence items are hashable, O(N**2) if not.
Peter Bengtsson has a blog post with an empirical comparison of other
approaches:
http://www.peterbe.com/plog/uniqifiers-benchmark
If order is not important and the sequence items are hashable then
list(set(seq)) is readable and efficient.
If order is important and the sequence items are hashable generator
expressions can be used (in py >= 2.4) (useful for large sequences):
seen = set()
do_something(x for x in seq if x not in seen or seen.add(x))
Arguments:
seq -- sequence
preserve_order -- if not set the order will be arbitrary
Using this option will incur a speed penalty.
(default: False)
Example showing order preservation:
>>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True)
['a', 'aa', 'b', 'ccc', 'd']
Example using a sequence of un-hashable items:
>>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True)
[['z'], ['x'], ['y']]
The sorted output or the non-order-preserving approach should equal
that of the sorted order-preserving approach output:
>>> unordered = uniquify([3, 3, 1, 2], preserve_order=False)
>>> unordered.sort()
>>> ordered = uniquify([3, 3, 1, 2], preserve_order=True)
>>> ordered.sort()
>>> ordered
[1, 2, 3]
>>> int(ordered == unordered)
1
"""
try:
# Attempt fast algorithm.
d = {}
if preserve_order:
# This is based on Dave Kirby's method (f8) noted in the post:
# http://www.peterbe.com/plog/uniqifiers-benchmark
return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)]
else:
for x in seq:
d[x] = 0
return d.keys()
except TypeError:
# Have an unhashable object, so use slow algorithm.
result = []
app = result.append
for x in seq:
if x not in result:
app(x)
return result
# Alias to noun form for backward compatibility.
unique = uniquify
def reverse_dict(d):
"""Reverse a dictionary so the items become the keys and vice-versa.
Note: The results will be arbitrary if the items are not unique.
>>> d = reverse_dict({'a': 1, 'b': 2})
>>> d_items = d.items()
>>> d_items.sort()
>>> d_items
[(1, 'a'), (2, 'b')]
"""
result = {}
for key, value in d.items():
result[value] = key
return result
def lsb(x, n):
"""Return the n least significant bits of x.
>>> lsb(13, 3)
5
"""
return x & ((2 ** n) - 1)
def gray_encode(i):
"""Gray encode the given integer."""
return i ^ (i >> 1)
def random_vec(bits, max_value=None):
"""Generate a random binary vector of length bits and given max value."""
vector = ""
for _ in range(int(bits / 10) + 1):
i = int((2**10) * random.random())
vector += int2bin(i, 10)
if max_value and (max_value < 2 ** bits - 1):
vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits)
return vector[0:bits]
def binary_range(bits):
"""Return a list of all possible binary numbers in order with width=bits.
It would be nice to extend it to match the
functionality of python's range() built-in function.
"""
l = []
v = ['0'] * bits
toggle = [1] + [0] * bits
while toggle[bits] != 1:
v_copy = v[:]
v_copy.reverse()
l.append(''.join(v_copy))
toggle = [1] + [0]*bits
i = 0
while i < bits and toggle[i] == 1:
if toggle[i]:
if v[i] == '0':
v[i] = '1'
toggle[i+1] = 0
else:
v[i] = '0'
toggle[i+1] = 1
i += 1
return l
def float_range(start, stop=None, step=None):
"""Return a list containing an arithmetic progression of floats.
Return a list of floats between 0.0 (or start) and stop with an
increment of step.
This is in functionality to python's range() built-in function
but can accept float increments.
As with range(), stop is omitted from the list.
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
l = []
while cur < stop:
l.append(cur)
cur += step
return l
def find_common_fixes(s1, s2):
"""Find common (prefix, suffix) of two strings.
>>> find_common_fixes('abc', 'def')
('', '')
>>> find_common_fixes('abcelephantdef', 'abccowdef')
('abc', 'def')
>>> find_common_fixes('abcelephantdef', 'abccow')
('abc', '')
>>> find_common_fixes('elephantdef', 'abccowdef')
('', 'def')
"""
prefix = []
suffix = []
i = 0
common_len = min(len(s1), len(s2))
while i < common_len:
if s1[i] != s2[i]:
break
prefix.append(s1[i])
i += 1
i = 1
while i < (common_len + 1):
if s1[-i] != s2[-i]:
break
suffix.append(s1[-i])
i += 1
suffix.reverse()
prefix = ''.join(prefix)
suffix = ''.join(suffix)
return (prefix, suffix)
def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False
def getmodule(obj):
"""Return the module that contains the object definition of obj.
Note: Use inspect.getmodule instead.
Arguments:
obj -- python obj, generally a class or a function
Examples:
A function:
>>> module = getmodule(random.choice)
>>> module.__name__
'random'
>>> module is random
1
A class:
>>> module = getmodule(random.Random)
>>> module.__name__
'random'
>>> module is random
1
A class inheriting from a class in another module:
(note: The inheriting class must define at least one function.)
>>> class MyRandom(random.Random):
... def play(self):
... pass
>>> module = getmodule(MyRandom)
>>> if __name__ == '__main__':
... name = 'rad_util'
... else:
... name = module.__name__
>>> name
'rad_util'
>>> module is sys.modules[__name__]
1
Discussion:
This approach is slightly hackish, and won't work in various situations.
However, this was the approach recommended by GvR, so it's as good as
you'll get.
See GvR's post in this thread:
http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236
"""
if hasattr(obj, 'func_globals'):
func = obj
else:
# Handle classes.
func = None
for item in obj.__dict__.values():
if hasattr(item, 'func_globals'):
func = item
break
if func is None:
raise ValueError("No functions attached to object: %r" % obj)
module_name = func.func_globals['__name__']
# Get module.
module = sys.modules[module_name]
return module
def round_grid(value, grid, mode=0):
"""Round off the given value to the given grid size.
Arguments:
value -- value to be roudne
grid -- result must be a multiple of this
mode -- 0 nearest, 1 up, -1 down
Examples:
>>> round_grid(7.5, 5)
10
>>> round_grid(7.5, 5, mode=-1)
5
>>> round_grid(7.3, 5, mode=1)
10
>>> round_grid(7.3, 5.0, mode=1)
10.0
"""
off_grid = value % grid
if mode == 0:
add_one = int(off_grid >= (grid / 2.0))
elif mode == 1 and off_grid:
add_one = 1
elif mode == -1 and off_grid:
add_one = 0
result = ((int(value / grid) + add_one) * grid)
return result
def get_args(argv):
"""Store command-line args in a dictionary.
-, -- prefixes are removed
Items not prefixed with - or -- are stored as a list, indexed by 'args'
For options that take a value use --option=value
Consider using optparse or getopt (in Python standard library) instead.
"""
d = {}
args = []
for arg in argv:
if arg.startswith('-'):
parts = re.sub(r'^-+', '', arg).split('=')
if len(parts) == 2:
d[parts[0]] = parts[1]
else:
d[parts[0]] = None
else:
args.append(arg)
d['args'] = args
return d
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
| gpl-2.0 |
baloan/qsocket | test/test_qsocket.py | 1 | 6703 | #!/usr/bin/python3.5
# encoding: utf8
"""qsocket test suite"""
import threading
import multiprocessing as mp
from time import sleep
import unittest
import time
from qsocket import QSocket, Listener, create_qsocket
class Echo(QSocket):
def __init__(self, socket):
QSocket.__init__(self, socket)
def process(self, obj):
self.send(obj)
class EchoServer(unittest.TestCase):
def setUp(self):
port = 8020
laddr = ("", port)
raddr = ("127.0.0.1", port)
# Server
self.listener = Listener(laddr, Echo)
self.listener.start()
# Client
self.alice = create_qsocket(raddr)
# self.bob open connection
self.bob = self.listener.accept()
print("[{}] [{}] runs {} ----------".format(
threading.current_thread().name, threading.current_thread().ident, self.id()))
print("self.listener {}".format(self.listener))
print("self.alice {}".format(self.alice.receiver_t))
print("self.bob {}".format(self.bob.receiver_t))
def test_string(self):
req = "Hello echo!"
self.alice.send(req)
resp = self.alice.recv()
self.assertEqual(req, resp)
def tearDown(self):
print("[{}] [{}] tearing down".format(
threading.current_thread().name, threading.current_thread().ident))
self.alice.close()
self.bob.close()
self.listener.close()
class SocketClose(unittest.TestCase):
def setUp(self):
port = 8021
laddr = ("", port)
raddr = ("127.0.0.1", port)
# Server
self.listener = Listener(laddr)
self.listener.start()
# Client
self.alice = create_qsocket(raddr)
# self.bob open connection
self.bob = self.listener.accept()
print("[{}] [{}] runs {} ----------".format(
threading.current_thread().name, threading.current_thread().ident, self.id()))
print("self.listener {}".format(self.listener))
print("self.alice {}".format(self.alice.receiver_t))
print("self.bob {}".format(self.bob.receiver_t))
def tearDown(self):
print("[{}] [{}] closing all sockets".format(
threading.current_thread().name, threading.current_thread().ident))
self.alice.close()
self.bob.close()
self.listener.close()
def test_ping_pong(self):
# alice sends an update
req = {"action": "subscribe", "name": "foo", }
self.alice.send(req)
resp = self.bob.recv()
self.assertEqual(req, resp)
# bob sends an update
req = {"action": "update", "name": "foo", "value": 120.2}
self.bob.send(req)
resp = self.alice.recv()
self.assertEqual(req, resp)
def test_remote_close(self):
# alice sends an update
req = {"action": "create", "name": "foo", }
self.alice.send(req)
resp = self.bob.recv()
self.assertEqual(req, resp)
self.bob.close(wait=True)
# wait for remote socket close (needs select timeout)
sleep(QSocket.SELECT_TIMEOUT)
req = {"action": "update", "name": "foo", "value": 120.2}
self.assertRaises(OSError, self.alice.send, req)
def test_remote_close2(self):
# alice sends an update
req = {"action": "create", "name": "foo", }
self.alice.send(req)
resp = self.bob.recv()
self.assertEqual(req, resp)
self.bob.close(wait=True)
# local inq None: socket closed
resp = self.bob.recv()
self.assertEqual(resp, None)
# inq None: socket closed
resp = self.alice.recv()
self.assertEqual(resp, None)
def test_local_close(self):
# alice sends an update
req = {"action": "create", "name": "foo", }
self.alice.send(req)
resp = self.bob.recv()
self.assertEqual(req, resp)
self.alice.close(wait=True)
# wait for remote socket close (needs select timeout)
sleep(QSocket.SELECT_TIMEOUT)
req = {"action": "update", "name": "foo", "value": 120.2}
self.assertRaises(OSError, self.bob.send, req)
def test_local_close2(self):
# alice sends an update
req = {"action": "create", "name": "foo", }
self.alice.send(req)
resp = self.bob.recv()
self.assertEqual(req, resp)
self.alice.close(wait=True)
# local inq None: socket closed
resp = self.alice.recv()
self.assertEqual(resp, None)
# remote inq None: socket closed
resp = self.bob.recv()
self.assertEqual(resp, None)
def bob(laddr, socket_class):
print("{}:{} [{}] runs ----------".format(mp.current_process().pid,
threading.current_thread().ident,
threading.current_thread().name))
listener = Listener(laddr, socket_class)
listener.start()
qsocket = listener.accept()
print("{} connection accepted".format(mp.current_process()))
print("self.listener {}".format(listener))
print("self.qsocket {}".format(qsocket.receiver_t))
# wait for close
_ = qsocket.recv()
print("{} closing socket".format(mp.current_process()))
listener.close()
class NonFunctional(unittest.TestCase):
def setUp(self):
port = 8022
laddr = ("", port)
raddr = ("127.0.0.1", port)
# Server
proc = mp.Process(target=bob, args=(laddr, Echo,))
proc.start()
# Client
self.alice = create_qsocket(raddr)
print("self.alice connected from {} to {}".format(
self.alice.sock.getsockname(), self.alice.sock.getpeername()))
print("[{}] [{}] runs {} ----------".format(
threading.current_thread().name, threading.current_thread().ident, self.id()))
print("self.alice {}".format(self.alice.receiver_t))
def tearDown(self):
print("[{}] [{}] closing all sockets".format(
threading.current_thread().name, threading.current_thread().ident))
self.alice.close()
def test_size_ladder(self):
sleep(1)
sizes = (100, 1000, 10000, 100000, 1000000, 10000000,)
for s in sizes:
send_buffer = b'i' * s
t0 = time.perf_counter()
self.alice.send(send_buffer)
recv_buffer = self.alice.recv()
self.assertEqual(send_buffer, recv_buffer)
t1 = time.perf_counter()
td = t1 - t0
print(
"echo ping-pong of {:10} bytes took {:4.4f}s, {:9.0f}kB/s".format(len(send_buffer), td, s / 1000 / td))
| mit |
kbrebanov/ansible | lib/ansible/modules/cloud/google/gce.py | 7 | 28171 | #!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
required: false
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
required: false
default: null
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
required: false
default: null
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
required: false
default: null
service_account_email:
version_added: "1.5.1"
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
default: null
required: false
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
required: false
default: null
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
required: false
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
required: false
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
required: false
default: null
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
required: false
default: null
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for
gateways)
required: false
default: "false"
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
required: false
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
preemptible:
version_added: "2.1"
description:
- if set to true, instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
required: false
default: "false"
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
required: false
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return ({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i + 1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(default='debian-8'),
image_family=dict(),
external_projects=dict(type='list'),
instance_names=dict(),
machine_type=dict(default='n1-standard-1'),
metadata=dict(),
name=dict(aliases=['base_name']),
num_instances=dict(type='int'),
network=dict(default='default'),
subnetwork=dict(),
persistent_boot_disk=dict(type='bool', default=False),
disks=dict(type='list'),
state=dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags=dict(type='list'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete=dict(type='bool', default=True),
disk_size=dict(type='int', default=10),
preemptible=dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main()
| gpl-3.0 |
mitocw/edx-platform | common/djangoapps/util/organizations_helpers.py | 4 | 3343 | """
Utility library for working with the edx-organizations app
"""
from django.conf import settings
from django.db.utils import DatabaseError
def add_organization(organization_data):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
return organizations_api.add_organization(organization_data=organization_data)
def add_organization_course(organization_data, course_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
return organizations_api.add_organization_course(organization_data=organization_data, course_key=course_id)
def get_organization(organization_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_organization(organization_id)
def get_organization_by_short_name(organization_short_name):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
from organizations.exceptions import InvalidOrganizationException
try:
return organizations_api.get_organization_by_short_name(organization_short_name)
except InvalidOrganizationException:
return None
def get_organizations():
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
# Due to the way unit tests run for edx-platform, models are not yet available at the time
# of Django admin form instantiation. This unfortunately results in an invocation of the following
# workflow, because the test configuration is (correctly) configured to exercise the application
# The good news is that this case does not manifest in the Real World, because migrations have
# been run ahead of application instantiation and the flag set only when that is truly the case.
try:
return organizations_api.get_organizations()
except DatabaseError:
return []
def get_organization_courses(organization_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_organization_courses(organization_id)
def get_course_organizations(course_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_course_organizations(course_id)
def get_course_organization_id(course_id):
"""
Returns organization id for course or None if the course is not linked to an org
"""
course_organization = get_course_organizations(course_id)
if course_organization:
return course_organization[0]['id']
return None
def organizations_enabled():
"""
Returns boolean indication if organizations app is enabled on not.
"""
return settings.FEATURES.get('ORGANIZATIONS_APP', False)
| agpl-3.0 |
chjw8016/GreenOdoo7-haibao | openerp/addons/hr_timesheet/wizard/hr_timesheet_print_employee.py | 53 | 2739 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
class analytical_timesheet_employee(osv.osv_memory):
_name = 'hr.analytical.timesheet.employee'
_description = 'Print Employee Timesheet & Print My Timesheet'
_columns = {
'month': fields.selection([(1,'January'), (2,'February'), (3,'March'), (4,'April'),
(5,'May'), (6,'June'), (7,'July'), (8,'August'), (9,'September'),
(10,'October'), (11,'November'), (12,'December')], 'Month', required=True),
'year': fields.integer('Year', required=True),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True)
}
def _get_user(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise osv.except_osv(_("Warning!"), _("Please define employee for this user!"))
return emp_id and emp_id[0] or False
_defaults = {
'month': lambda *a: datetime.date.today().month,
'year': lambda *a: datetime.date.today().year,
'employee_id': _get_user
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data['employee_id'] = data['employee_id'][0]
datas = {
'ids': [],
'model': 'hr.employee',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'hr.analytical.timesheet',
'datas': datas,
}
analytical_timesheet_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
kapamaroo/MPD-plasma-client | contents/code/settings.py | 1 | 2848 | # -*- coding: utf-8 -*-
#
# This file is part of MPD-plasma-client
# MPD-plasma-client it simply mpd-server client written on python
#
# Copyright (C) 2010 Vladimir Krylov <memnek@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2,
# or (at your option) any later version, as published by the Free
# Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyKDE4.kdecore import *
from PyKDE4.kdeui import *
from PyKDE4.kio import *
from ui_config import Ui_Dialog
class Settings(QWidget, Ui_Dialog):
def __init__(self, parent, defaultSettings = None):
QWidget.__init__(self)
self.parent = parent
self.setupUi(self)
if defaultSettings:
self.hostLineEdit.setText(defaultSettings['host'])
self.portLineEdit.setText(str(defaultSettings['port']))
if defaultSettings['orientation'] == 'v':
self.verticalRadioButton.setChecked(True)
else:
self.horizontalRadioButton.setChecked(True)
if defaultSettings['icons'] == 'u':
self.userIconsRadioButton.setChecked(True)
else:
self.systemIconsRadioButton.setChecked(True)
if defaultSettings['stop']:
self.stopCheckBox.setChecked(True)
else:
self.stopCheckBox.setChecked(False)
if defaultSettings['popup']:
self.showPopupDialogCheckBox.setChecked(True)
else:
self.showPopupDialogCheckBox.setChecked(False)
if defaultSettings['cover']:
self.showCoverImageCheckBox.setChecked(True)
else:
self.showCoverImageCheckBox.setChecked(False)
def getSettings(self):
host = str.strip(str(self.hostLineEdit.text()))
port = int(str.strip(str(self.portLineEdit.text())))
if self.horizontalRadioButton.isChecked():
orientation = 'h'
else:
orientation = 'v'
if self.systemIconsRadioButton.isChecked():
iconTheme = 's'
else:
iconTheme = 'u'
if self.stopCheckBox.isChecked():
stopButton = True
else:
stopButton = False
if self.showPopupDialogCheckBox.isChecked():
popup = True
else:
popup = False
if self.showCoverImageCheckBox.isChecked():
cover = True
else:
cover = False
return {'host': host, \
'port': port, \
'orientation': orientation, \
'icons': iconTheme, \
'stop': stopButton, \
'popup': popup, \
'cover': cover}
| gpl-3.0 |
geometalab/osmaxx | tests/conversion/view_test.py | 2 | 4771 | import pytest
from rest_framework.reverse import reverse
from osmaxx.conversion import status
authenticated_access_urls = [
reverse('clipping_area-list'),
reverse('conversion_job-list'),
reverse('conversion_parametrization-list'),
]
@pytest.fixture(params=authenticated_access_urls)
def access_url(request):
return request.param
def test_access_for_unauthorized_user_denied(client, access_url):
response = client.get(access_url)
assert response.status_code == 403
@pytest.mark.django_db()
def test_access_for_authenticated_client_allowed(authenticated_client, access_url):
response = authenticated_client.get(access_url)
assert response.status_code == 200
def test_access_for_admin_user_allowed(admin_client, access_url):
response = admin_client.get(access_url)
assert response.status_code == 200
@pytest.mark.django_db()
def test_conversion_parametrization_creation_success(authenticated_api_client, conversion_parametrization_data):
response = authenticated_api_client.post(reverse('conversion_parametrization-list'), conversion_parametrization_data, format='json')
assert response.status_code == 201
@pytest.mark.django_db()
def test_conversion_parametrization_creation_fails(api_client, conversion_parametrization_data):
response = api_client.post(reverse('conversion_parametrization-list'), conversion_parametrization_data, format='json')
assert response.status_code == 403
@pytest.mark.django_db()
def test_conversion_parametrization_detail_access_success(authenticated_api_client, conversion_parametrization, persisted_valid_clipping_area):
response = authenticated_api_client.get(reverse('conversion_parametrization-detail', kwargs={'pk': conversion_parametrization.id}))
assert response.status_code == 200
data = response.json()
assert data['id'] == conversion_parametrization.id
assert data['out_format'] == conversion_parametrization.out_format
assert data['out_srs'] == conversion_parametrization.out_srs
assert data['clipping_area'] == persisted_valid_clipping_area.id
@pytest.mark.django_db()
def test_conversion_parametrization_detail_access_fails(api_client, conversion_parametrization):
response = api_client.get(reverse('conversion_parametrization-detail', kwargs={'pk': conversion_parametrization.id}))
assert response.status_code == 403
@pytest.mark.django_db()
def test_conversion_job_creation_success(authenticated_api_client, conversion_job_data, mocker):
start_conversion_mock = mocker.patch('osmaxx.conversion.models.Job.start_conversion')
response = authenticated_api_client.post(reverse('conversion_job-list'), conversion_job_data, format='json')
data = response.json()
assert response.status_code == 201
assert data['callback_url'] == conversion_job_data['callback_url']
assert data['parametrization'] == conversion_job_data['parametrization']
assert start_conversion_mock.call_count == 1
@pytest.mark.django_db()
def test_conversion_job_creation_fails(api_client, conversion_job_data):
response = api_client.post(reverse('conversion_job-list'), conversion_job_data, format='json')
assert response.status_code == 403
@pytest.mark.django_db()
def test_conversion_job_detail_access_success(authenticated_api_client, conversion_job, conversion_parametrization):
response = authenticated_api_client.get(reverse('conversion_job-detail', kwargs={'pk': conversion_job.id}))
assert response.status_code == 200
data = response.json()
assert data['id'] == conversion_job.id
assert data['callback_url'] == conversion_job.callback_url
assert data['parametrization'] == conversion_parametrization.id
assert data['status'] == status.RECEIVED
assert data['resulting_file_path'] is None
@pytest.mark.django_db()
def test_conversion_job_detail_access_fails_with_anonymous_user(api_client, conversion_job):
response = api_client.get(reverse('conversion_job-detail', kwargs={'pk': conversion_job.id}))
assert response.status_code == 403
@pytest.mark.django_db()
def test_conversion_job_absolute_url_resolves_correct(conversion_job, server_url):
url = server_url + reverse('conversion_job-detail', kwargs={'pk': conversion_job.id})
assert conversion_job.get_absolute_url() == url
@pytest.mark.django_db()
def test_conversion_job_creation_enqueues(authenticated_api_client, conversion_job_data, rq_mock_return, mocker):
conversion_start_start_format_extraction_mock = mocker.patch('osmaxx.conversion.converters.converter.rq_enqueue_with_settings', return_value=rq_mock_return())
authenticated_api_client.post(reverse('conversion_job-list'), conversion_job_data, format='json')
assert conversion_start_start_format_extraction_mock.call_count == 1
| mit |
geome-mitbbs/QTS_Research | Trade_Algo.py | 1 | 7950 | try:
from . import Portfolio
from . import Data_API
from .Quant_Indicators import *
except:
import Portfolio
import Data_API
from Quant_Indicators import *
class Trade_Algo:
def __init__(self,command=None):
self.command = command
if not self.safety_check():
self.command = """raise Exception("not safe to run")"""
def filter_string(self):
# first suppress all the """sss""" string.
new_command = ""
left_quotes_saw = 0
left_quotes_pos = []
for i in range(len(self.command)):
if(self.command[i] != "\""):
left_quotes_saw = 0
else:
if(left_quotes_saw<3):
left_quotes_saw += 1
if(left_quotes_saw==3):
left_quotes_pos.append(i-2)
left_quotes_saw = 0
if(len(left_quotes_pos)//2 * 2 != len(left_quotes_pos)):
raise Exception("Not proper string")
if(len(left_quotes_pos)==0):
return self.command
for i in range(len(left_quotes_pos)//2):
if i==0:
new_command += self.command[0:left_quotes_pos[2*i]]
else:
new_command += self.command[left_quotes_pos[2*i-1]+3:left_quotes_pos[2*i]]
if i== len(left_quotes_pos)//2-1:
new_command += self.command[left_quotes_pos[2*i+1]+3:]
return new_command
def find_used_vars(self):
new_command = self.filter_string()
ret = dict()
list = ['portfolio','portfolio1','portfolio2','portfolio3','quant_index','quant_index1','quant_index2','quant_index3']
for item in list:
if item in new_command:
ret[item] = None
self.used_vars = ret
def find_tickers(self):
# find all the "ABC" in the command and they should be all the tickers
if(self.command == """raise Exception("not safe to run")"""):
self.tickers = []
return
new_command = self.filter_string()
tickers = []
current_ticker = ""
saw_left_quote = False
for c in new_command:
if not saw_left_quote:
if c != "\"":
pass
else:
saw_left_quote = True
else:
if c != "\"":
current_ticker += c
else:
tickers.append(current_ticker)
current_ticker = ""
saw_left_quote = False
self.tickers = tickers
def safety_check(self):
# check if self.command is safe to run....need this before go production
return True
def back_test(self,start_date=None,end_date=None,initial_cash=0,initial_portfolio=None):
if end_date == None:
end_date = Data_API.Pricing_Database.pricing_date
if isinstance(start_date,int):
start_date = Data_API.add_pricing_date(start_date,in_place=False)
if isinstance(end_date,int):
end_date = Data_API.add_pricing_date(end_date,in_place=False)
#for pnl
if initial_portfolio == None:
portfolio = Portfolio.Portfolio(initial_cash)
portfolio1=Portfolio.Portfolio(initial_cash)
portfolio2=Portfolio.Portfolio(initial_cash)
portfolio3=Portfolio.Portfolio(initial_cash)
else:
portfolio = initial_portfolio
portfolio1 = initial_portfolio
portfolio2 = initial_portfolio
portfolio3 = initial_portfolio
#for information
quant_index=[]
quant_index1=[]
quant_index2=[]
quant_index3=[]
self.find_tickers()
self.find_used_vars()
cache = Data_API.Cache()
for ticker in self.tickers:
cache.get_ticker_data(ticker)
#set back the date.
orig_pd = Data_API.Pricing_Database.pricing_date
try:
Data_API.set_pricing_date(start_date)
while Data_API.Pricing_Database.pricing_date <= end_date:
exec(self.command)
portfolio.record_pnl()
portfolio1.record_pnl()
portfolio2.record_pnl()
portfolio3.record_pnl()
Data_API.add_pricing_date(1)
self.portfolio = portfolio
self.portfolio1 = portfolio1
self.portfolio2 = portfolio2
self.portfolio3 = portfolio3
self.quant_index = quant_index
self.quant_index1=quant_index1
self.quant_index2=quant_index2
self.quant_index3=quant_index3
Data_API.set_pricing_date(orig_pd)
self.pnls = {'portfolio':self.portfolio.pnl_as_of_date,\
'portfolio1':self.portfolio1.pnl_as_of_date,\
'portfolio2':self.portfolio2.pnl_as_of_date,\
'portfolio3':self.portfolio3.pnl_as_of_date}
self.quant_indices = {'quant_index':self.quant_index,\
'quant_index1':self.quant_index1,\
'quant_index2':self.quant_index2,\
'quant_index3':self.quant_index3}
except Exception as e:
Data_API.set_pricing_date(orig_pd)
raise e
def back_test_summary(self):
output = ""
if "portfolio" in self.used_vars:
output += """portfolio:\n""" + str(self.portfolio.get_measures()) + """\n"""
if "portfolio1" in self.used_vars:
output += """portfolio1:\n""" + str(self.portfolio1.get_measures()) + """\n"""
if "portfolio2" in self.used_vars:
output += """portfolio2:\n""" + str(self.portfolio2.get_measures()) + """\n"""
if "portfolio3" in self.used_vars:
output += """portfolio3:\n""" + str(self.portfolio3.get_measures()) + """\n"""
return output
def back_test_plot(self):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
fig = plt.figure()
all_lines = []
ax = fig.add_subplot(111)
ax.set_ylabel('PnL')
has_right_ax = False
if 'quant_index' in self.used_vars or \
'quant_index1' in self.used_vars or \
'quant_index2' in self.used_vars or \
'quant_index3' in self.used_vars:
has_right_ax = True
dates = [ x[0] for x in self.pnls['portfolio'] ]
for v in self.used_vars:
if 'portfolio' in v:
all_lines += ax.plot(dates, [x[1] for x in self.pnls[v]],label=v,linewidth=1)
if has_right_ax:
right_ax = ax.twinx()
for v in self.used_vars:
if 'index' in v:
all_lines += right_ax.plot(dates, self.quant_indices[v],label=v,linewidth=1,ls='dotted')
right_ax.set_ylabel('quant_index')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = min(dates)
datemax = max(dates)
ax.set_xlim(datemin, datemax)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
fig.tight_layout()
plt.legend(all_lines,[l.get_label() for l in all_lines],loc='best')
plt.show()
| mit |
valkyriesavage/invenio | modules/bibexport/lib/bibexport.py | 17 | 5945 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibExport daemon.
Usage: %s [options]
Scheduling options:
-u, --user=USER user name to store task, password needed
-s, --sleeptime=SLEEP time after which to repeat tasks (no)
e.g.: 1s, 30m, 24h, 7d
-t, --time=TIME moment for the task to be active (now)
e.g.: +15s, 5m, 3h , 2002-10-27 13:57:26
General options:
-h, --help print this help and exit
-V, --version print version and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
"""
__revision__ = "$Id$"
import os
import sys
from ConfigParser import ConfigParser
from invenio.config import CFG_ETCDIR
from invenio.dbquery import run_sql
from invenio.bibtask import task_init, write_message, task_set_option, \
task_get_option, task_has_option, task_get_task_param
def _detect_jobs_to_run(string_of_jobnames=None):
"""Detect which jobs to run from optional string of jobs.
If not passed, run all jobs.
Return list of jobnames to run."""
if string_of_jobnames:
jobnames = string_of_jobnames.split(',')
else:
jobnames = []
# FIXME: pay attention to periodicity; extract only jobs needed to run
res = run_sql("SELECT jobname FROM expJOB")
for row in res:
jobnames.append(row[0])
return jobnames
def _detect_export_method(jobname):
"""Detect export method of JOBNAME. Basically, parse JOBNAME.cfg
and return export_method. Return None if problem found."""
jobconf = ConfigParser()
jobconffile = CFG_ETCDIR + os.sep + 'bibexport' + os.sep + jobname + '.cfg'
if not os.path.exists(jobconffile):
write_message("ERROR: cannot find config file %s." % jobconffile, sys.stderr)
return None
jobconf.read(jobconffile)
export_method = jobconf.get('export_job', 'export_method')
return export_method
def _update_job_lastrun_time(jobname):
"""Update expJOB table and set lastrun time of JOBNAME to the task
starting time."""
run_sql("UPDATE expJOB SET lastrun=%s WHERE jobname=%s",
(task_get_task_param('task_starting_time'), jobname,))
def task_run_core():
"""
Runs the task by fetching arguments from the BibSched task queue. This is
what BibSched will be invoking via daemon call.
"""
errors_encountered_p = False
jobnames = _detect_jobs_to_run(task_get_option('wjob'))
for jobname in jobnames:
jobname_export_method = _detect_export_method(jobname)
if not jobname_export_method:
write_message("ERROR: cannot detect export method for job %s." % jobname, sys.stderr)
errors_encountered_p = True
else:
try:
# every bibexport method must define run_export_job() that will do the job
exec "from invenio.bibexport_method_%s import run_export_method" % jobname_export_method
write_message("started export job " + jobname, verbose=3)
# pylint: disable=E0602
# The import is done via the exec command 2 lines above.
run_export_method(jobname)
# pylint: enable=E0602
_update_job_lastrun_time(jobname)
write_message("finished export job " + jobname, verbose=3)
except Exception, msg:
write_message("ERROR: cannot run export job %s: %s." % (jobname, msg), sys.stderr)
errors_encountered_p = True
return not errors_encountered_p
def task_submit_check_options():
"""Check that options are valid."""
if task_has_option('wjob'):
jobnames = task_get_option('wjob')
if jobnames:
jobnames = jobnames.split(',')
for jobname in jobnames:
res = run_sql("SELECT COUNT(*) FROM expJOB WHERE jobname=%s", (jobname,))
if res and res[0][0]:
# okay, jobname exists
pass
else:
write_message("Sorry, job name %s is not known. Exiting." % jobname)
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
"""Usual 'elaboration' of task specific parameters adapted to the bibexport task."""
if key in ("-w", "--wjob"):
task_set_option("wjob", value)
else:
return False
return True
def main():
"""Main function that constructs full bibtask."""
task_init(authorization_action='runbibexport',
authorization_msg="BibExport Task Submission",
help_specific_usage="""Export options:
-w, --wjob=j1[,j2]\tRun specific exporting jobs j1, j2, etc (e.g. 'sitemap').
""",
version=__revision__,
specific_params=("w:", ["wjob=",]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=task_submit_check_options,
task_run_fnc=task_run_core)
if __name__ == "__main__":
_detect_export_method("sitemap")
main()
| gpl-2.0 |
kriswuollett/grpc | src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py | 38 | 1752 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_BEFORE_IMPORT = tuple(globals())
from grpc import *
_AFTER_IMPORT = tuple(globals())
GRPC_ELEMENTS = tuple(
element for element in _AFTER_IMPORT
if element not in _BEFORE_IMPORT and element != '_BEFORE_IMPORT')
| bsd-3-clause |
vmax-feihu/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/config.py | 56 | 1420 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import CONFIGNS
from .element import Element
# Autogenerated
def ConfigItem(**args):
return Element(qname = (CONFIGNS, 'config-item'), **args)
def ConfigItemMapEntry(**args):
return Element(qname = (CONFIGNS,'config-item-map-entry'), **args)
def ConfigItemMapIndexed(**args):
return Element(qname = (CONFIGNS,'config-item-map-indexed'), **args)
def ConfigItemMapNamed(**args):
return Element(qname = (CONFIGNS,'config-item-map-named'), **args)
def ConfigItemSet(**args):
return Element(qname = (CONFIGNS, 'config-item-set'), **args)
| apache-2.0 |
maxkoryukov/route4me-python-sdk | setup.py | 1 | 2522 | # -*- coding: utf-8 -*-
import os
from setuptools import setup
from setuptools import find_packages
from VERSION import PROJECT
from VERSION import COPYRIGHT
from VERSION import AUTHOR
from VERSION import TITLE
from VERSION import LICENSE
from VERSION import RELEASE_STRING
cwd = os.path.dirname(__file__)
def read_all(file_name):
fullname = os.path.join(cwd, file_name)
with open(fullname) as f:
return f.read()
def rewrite_version():
with open('VERSION.py', 'r') as inp:
txt = inp.read()
outname = os.path.join('route4me', 'sdk', 'version.py')
with open(outname, 'w') as out:
out.write(txt)
rewrite_version()
setup(
name=TITLE,
version=RELEASE_STRING,
url='https://github.com/route4me/route4me-python-sdk',
bugtrack_url='https://github.com/route4me/route4me-python-sdk/issues',
license=LICENSE,
copyright=COPYRIGHT,
author=AUTHOR,
author_email='python-team@route4me.com',
description=PROJECT,
long_description=read_all('README.rst'),
keywords='route4me, python, sdk, api',
packages=find_packages(
include=['route4me.sdk', 'route4me.sdk.*'],
exclude=['*_test*'],
),
zip_safe=True,
platforms='any',
classifiers=[
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Environment :: Other Environment',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='pytest',
install_requires=[
'six ==1.10.0',
'requests ==2.18.4',
'enum34 ==1.1.6',
'pydash ==4.1.0',
'arrow ==0.10.0',
],
# include_package_data=True,
# extras_require={
# 'dev': REQUIREMENTS_DEV,
# },
# entry_points='''
# [console_scripts]
# flask=flask.cli:main
# '''
)
| isc |
mrquim/mrquimrepo | plugin.video.poseidon/resources/lib/modules/sources.py | 4 | 51579 | # -*- coding: utf-8 -*-
'''
Poseidon Add-on
Copyright (C) 2017 Poseidon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys,pkgutil,re,json,urllib,urlparse,random,datetime,time
from resources.lib.modules import dialogs, dialogs_list
from resources.lib.modules.executor import execute
from schism_commons import cleantitle_get
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import workers
from resources.lib.modules import unshorten
import nanscrapers
debridstatus = control.setting('debridsources')
import os
from threading import Event
import xbmc
import xbmcaddon
import xbmcvfs
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
try: import urlresolver
except: pass
try: import xbmc
except: pass
_shst_regex = ['sh.st','viid.me']
class sources:
def __init__(self):
self.getConstants()
self.sources = []
def play(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
select = control.setting('hosts.mode') if select == None else select
title = tvshowtitle if not tvshowtitle == None else title
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0:
if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'):
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title.encode('utf-8'))))
elif select == '0' or select == '1' or select == '3' or select == '4':
url = self.sourcesDialog(items)
else:
url = self.sourcesDirect(items)
if url == None:
return self.errorForSources()
meta = json.loads(meta)
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, url, meta)
except:
pass
def play_alter(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta):
try:
url = None
items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
if control.setting('hosts.mode') == '2': select = "1"
else: select = "2"
title = tvshowtitle if not tvshowtitle == None else title
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0:
if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'):
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title.encode('utf-8'))))
elif select == '0' or select == '1' or select == '3' or select == '4' or select == '5':
url = self.sourcesDialog(items)
else:
url = self.sourcesDirect(items)
if url == None:
return self.errorForSources()
meta = json.loads(meta)
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, url, meta)
except:
pass
def play_dialog(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = self.getSource_dialog(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
title = tvshowtitle if not tvshowtitle == None else title
header = control.addonInfo('name')
header2 = header.upper()
try: meta = json.loads(meta)
except: meta = ''
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
filter = []
for i in range(len(items)):
try:
try:
label = '[B]%s[/B] | %s | [B][I]%s [/I][/B]' % (items[i]['scraper'], items[i]['source'], items[i]['quality'])
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), label.upper(), '')
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), label.upper())
# if items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = items[i]
if self.url == None: raise Exception()
try: progressDialog.close()
except: pass
control.sleep(200)
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, self.url, meta)
return self.url
except:
pass
try: progressDialog.close()
except: pass
self.errorForSources()
except:
pass
def play_dialog_list(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
select = control.setting('hosts.mode') if select == None else select
title = tvshowtitle if not tvshowtitle == None else title
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0: url = self.sourcesDialog2(items)
if url == None: return self.errorForSources()
meta = json.loads(meta)
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, url, meta)
except:
pass
def play_library(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
select = control.setting('hosts.mode') if select == None else select
title = tvshowtitle if not tvshowtitle == None else title
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0:
if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'):
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title.encode('utf-8'))))
elif select == '0' or select == '1':
url = self.sourcesDialog(items)
else:
url = self.sourcesDirect(items)
if url == None:
return self.errorForSources()
meta = 'play_library'
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, url, meta)
except:
pass
def addItem(self, title):
control.playlist.clear()
items = control.window.getProperty(self.itemProperty)
items = json.loads(items)
if items == None or len(items) == 0: control.idle() ; sys.exit()
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
downloads = True if control.setting('downloads') == 'true' and not (control.setting('movie.download.path') == '' or control.setting('tv.download.path') == '') else False
if 'tvshowtitle' in meta and 'season' in meta and 'episode' in meta:
name = '%s S%02dE%02d' % (title, int(meta['season']), int(meta['episode']))
elif 'year' in meta:
name = '%s (%s)' % (title, meta['year'])
else:
name = title
systitle = urllib.quote_plus(title.encode('utf-8'))
sysname = urllib.quote_plus(name.encode('utf-8'))
poster = meta['poster'] if 'poster' in meta else '0'
banner = meta['banner'] if 'banner' in meta else '0'
thumb = meta['thumb'] if 'thumb' in meta else poster
fanart = meta['fanart'] if 'fanart' in meta else '0'
if poster == '0': poster = control.addonPoster()
if banner == '0' and poster == '0': banner = control.addonBanner()
elif banner == '0': banner = poster
if thumb == '0' and fanart == '0': thumb = control.addonFanart()
elif thumb == '0': thumb = fanart
if control.setting('fanart') == 'true' and not fanart == '0': pass
else: fanart = control.addonFanart()
sysimage = urllib.quote_plus(poster.encode('utf-8'))
downloadMenu = control.lang(32403).encode('utf-8')
for i in range(len(items)):
try:
label = items[i]['label']
syssource = urllib.quote_plus(json.dumps([items[i]]))
sysurl = '%s?action=playItem&title=%s&source=%s' % (sysaddon, systitle, syssource)
cm = []
if downloads == True:
cm.append((downloadMenu, 'RunPlugin(%s?action=download&name=%s&image=%s&source=%s)' % (sysaddon, sysname, sysimage, syssource)))
item = control.item(label=label)
item.setArt({'icon': thumb, 'thumb': thumb, 'poster': poster, 'tvshow.poster': poster, 'season.poster': poster, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner})
if not fanart == None: item.setProperty('Fanart_Image', fanart)
item.addContextMenuItems(cm)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=sysurl, listitem=item, isFolder=False)
except:
pass
control.content(syshandle, 'files')
control.directory(syshandle, cacheToDisc=True)
def playItem(self, title, source):
try:
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
year = meta['year'] if 'year' in meta else None
season = meta['season'] if 'season' in meta else None
episode = meta['episode'] if 'episode' in meta else None
imdb = meta['imdb'] if 'imdb' in meta else None
tvdb = meta['tvdb'] if 'tvdb' in meta else None
next = [] ; prev = [] ; total = []
for i in range(1,1000):
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total: raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
next.append(u)
except:
break
for i in range(-1000,0)[::-1]:
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total: raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
prev.append(u)
except:
break
items = json.loads(source)
items = [i for i in items+next+prev][:40]
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(items)):
try:
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
if items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = items[i]['source']
if self.url == None: raise Exception()
try: progressDialog.close()
except: pass
control.sleep(200)
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
from resources.lib.modules.player import player
player().run(title, year, season, episode, imdb, tvdb, self.url, meta)
return self.url
except:
pass
try: progressDialog.close()
except: pass
self.errorForSources()
except:
pass
def getSource_dialog(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, presetDict=[], timeout=30):
self.__scrapers = []
sourceDict = []
for pkg, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
sourceDict = [i[0] for i in sourceDict if i[1] == False]
sourceDict = [(i, __import__(i, globals(), locals(), [], -1).source()) for i in sourceDict]
content = 'movie' if tvshowtitle == None else 'episode'
if content == 'movie':
sourceDict = [(i[0], i[1], getattr(i[1], 'movie', None)) for i in sourceDict]
else:
sourceDict = [(i[0], i[1], getattr(i[1], 'tvshow', None)) for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] == None]
try: sourceDict = [(i[0], i[1], control.setting('provider.' + i[0])) for i in sourceDict]
except: sourceDict = [(i[0], i[1], 'true') for i in sourceDict]
self.__scrapers = [i[1] for i in sourceDict if not i[2] == 'false']
self.title = title
self.year = year
self.imdb = imdb
self.tvdb = tvdb
self.season = season
self.episode = episode
self.tvshowtitle = tvshowtitle
self.premiered = premiered
print ("POSEIDON SELFSCRAPERS", self.__scrapers)
sourceDict = [i[0] for i in sourceDict if not i[2] == 'false']
threads = []
select_sources = []
if control.setting('cachesources') == 'true':
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
if content == 'movie':
scraped_sources = self.scrape_movie_with_dialog()
else:
scraped_sources = self.scrape_tv_with_dialog()
for item in scraped_sources:
if type(item) == tuple:
item = item[1]
if type(item) == list:
for subitem in item:
select_sources.extend(item)
else:
select_sources.append(item)
return select_sources
def scrape_tv_with_dialog(self, maximum_age=60, sort_function=None):
try:
timeout = int(control.setting('scrapers.timeout.1'))
except:
pass
self.timeout = timeout
allow_debrid = control.setting("debridsources") == "true"
scraper = nanscrapers.scrape_episode_with_dialog
link, rest = scraper(
self.tvshowtitle,
self.year,
self.premiered,
self.season,
self.episode,
self.imdb,
self.tvdb,
timeout=self.timeout,
extended=True,
sort_function=self.sort_function,
enable_debrid=allow_debrid)
if type(link) == dict and "path" in link:
link = link["path"]
result = [link]
result.extend(rest)
return result
def scrape_movie_with_dialog(self, maximum_age=60, sort_function=None):
try:
timeout = int(control.setting('scrapers.timeout.1'))
except:
pass
self.timeout = timeout
allow_debrid = control.setting("debridsources") == "true"
scraper = nanscrapers.scrape_movie_with_dialog
link, rest = scraper(
self.title,
self.year,
self.imdb,
timeout=self.timeout,
extended=True,
sort_function=self.sort_function,
enable_debrid=allow_debrid)
if type(link) == dict and "path" in link:
link = link["path"]
result = [link]
result.extend(rest)
return result
def to_dialog_tuple(self, scraper_array):
results_array = []
if scraper_array:
for link in scraper_array:
try:
url = link['url']
quality = ""
try:
quality = link['quality']
except:
quality = "SD"
if "1080" in quality: quality2 = "FHD"
elif "HD" in quality: quality2 = "HD"
else: quality2 = "SD"
label = '%s | %s | %s' % (quality, link['provider'], link['source'])
label = label.upper()
if not url == '' or url == None:
if not any(value in url for value in self.hostBlackList):
results_array.append(link)
except:
pass
return results_array
def getSources(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, presetDict=[], timeout=30):
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(control.addonInfo('name'), '')
progressDialog.update(0, 'Sources Incoming...')
# if control.setting('cachesources') == 'true': self.prepareSources()
content = 'movie' if tvshowtitle is None else 'episode'
try:
timeout = int(control.setting('scrapers.timeout.1'))
except:
pass
allow_debrid = control.setting("debridsources") == "true"
if control.setting('cachesources') == 'true':
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
if content == 'movie':
title = self.getTitle(title)
scraper = nanscrapers.scrape_movie
links_scraper = scraper(
title,
year,
imdb,
timeout=timeout,
enable_debrid=allow_debrid)
else:
tvshowtitle = self.getTitle(tvshowtitle)
scraper = nanscrapers.scrape_episode
links_scraper = scraper(
tvshowtitle,
year,
premiered,
season,
episode,
imdb,
tvdb,
timeout=timeout,
enable_debrid=allow_debrid)
thread = workers.Thread(self.get_nan_sources, links_scraper,
progressDialog)
thread.start()
for i in range(0, timeout * 2):
try:
if xbmc.abortRequested:
return sys.exit()
try:
if progressDialog.iscanceled():
break
except:
pass
if not thread.is_alive(): break
time.sleep(0.5)
except:
pass
try:
progressDialog.close()
except:
pass
self.sourcesFilter()
return self.sources
def get_nan_sources(self, links_scraper, progressDialog):
num_scrapers = len(nanscrapers.relevant_scrapers())
index = 0
string1 = "Time Elapsed %s"
string2 = control.lang(32405).encode('utf-8')
string3 = control.lang(32406).encode('utf-8')
counthd = 0
count1080 = 0
countSD = 0
for scraper_links in links_scraper():
try:
if xbmc.abortRequested:
return sys.exit()
if progressDialog.iscanceled():
break
index = index + 1
percent = int((index * 100) / num_scrapers)
if scraper_links is not None:
random.shuffle(scraper_links)
for scraper_link in scraper_links:
try:
q = scraper_link['quality']
if "1080" in q:
count1080 += 1
elif "HD" in q:
counthd += 1
elif "720" in q:
counthd += 1
scraper_link["quality"] = "HD"
elif "720" in q:
counthd += 1
scraper_link["quality"] = "HD"
elif "560" in q:
counthd += 1
scraper_link["quality"] = "HD"
else:
countSD += 1
except:
pass
progressDialog.update(percent,
"[B]1080: [/B] " + str(count1080) + " [B] HD: [/B]" + str(counthd) + " [B] SD: [/B]" + str(countSD) + " (" + str(len(self.sources)) + ")",
string3 % (num_scrapers - index))
self.sources.append(scraper_link)
try:
if progressDialog.iscanceled():
break
except:
pass
except:
pass
def prepareSources(self):
try:
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
except:
pass
def getTitle(self, title):
title = cleantitle.normalize(title)
return title
def getMovieSource(self, title, year, imdb, source, call):
source = cleantitle_get(str(source))
type = "movie"
try:
url = None
if url == None: url = call.movie(imdb, title, year)
if url == None: raise Exception()
except:
pass
try:
sources = []
sources = call.sources(url, self.hostDict, self.hostprDict)
if sources == None: raise Exception()
self.sources.extend(sources)
except:
pass
def getEpisodeSource(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, source, call):
source = cleantitle_get(str(source))
try:
url = None
if url == None: url = call.tvshow(imdb, tvdb, tvshowtitle, year)
if url == None: raise Exception()
except:
pass
try:
ep_url = None
if url == None: raise Exception()
if ep_url == None: ep_url = call.episode(url, imdb, tvdb, title, premiered, season, episode)
if ep_url == None: raise Exception()
except:
pass
try:
sources = []
sources = call.sources(ep_url, self.hostDict, self.hostprDict)
if sources == None: raise Exception()
self.sources.extend(sources)
except:
pass
def getMovieSource2(self, title, year, imdb, source, call):
str_call = str(call)
r = re.findall('resources.lib.sources.(.+?).source', str_call)[0]
if r:
source = r
else: source = "Poseidon"
type = "movie"
try:
url = None
if url == None: url = call.movie(imdb, title, year)
if url == None: raise Exception()
except:
pass
try:
sources = []
sources = call.sources(url, self.hostDict, self.hostprDict)
if sources == None: raise Exception()
self.sources.extend(sources)
except:
pass
return sources
def getEpisodeSource2(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, source, call):
str_call = str(call)
r = re.findall('resources.lib.sources.(.+?).source', str_call)[0]
if r:
source = r
else: source = "Poseidon"
type = "episode"
try:
url = None
if url == None: url = call.tvshow(imdb, tvdb, tvshowtitle, year)
if url == None: raise Exception()
except:
pass
try:
ep_url = None
if url == None: raise Exception()
if ep_url == None: ep_url = call.episode(url, imdb, tvdb, title, premiered, season, episode)
if ep_url == None: raise Exception()
except:
pass
try:
sources = []
sources = call.sources(ep_url, self.hostDict, self.hostprDict)
if sources == None: raise Exception()
self.sources.extend(sources)
except:
pass
return sources
def getURISource(self, url):
try:
sourceDict = []
for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
sourceDict = [i[0] for i in sourceDict if i[1] == False]
sourceDict = [(i, __import__(i, globals(), locals(), [], -1).source()) for i in sourceDict]
domain = (urlparse.urlparse(url).netloc).lower()
domains = [(i[0], i[1].domains) for i in sourceDict]
domains = [i[0] for i in domains if any(x in domain for x in i[1])]
if len(domains) == 0: return False
call = [i[1] for i in sourceDict if i[0] == domains[0]][0]
self.sources = call.sources(url, self.hostDict, self.hostprDict)
for i in range(len(self.sources)):
try: self.sources[i]['autoplay'] = True
except: pass
self.sources = self.sourcesFilter()
return self.sources
except:
pass
def alterSources(self, url, meta):
try:
if control.setting('hosts.mode') == '2': url += '&select=1'
else: url += '&select=2'
control.execute('RunPlugin(%s)' % url)
except:
pass
def clearSources(self):
try:
control.idle()
yes = control.yesnoDialog(control.lang(32407).encode('utf-8'), '', '')
if not yes: return
control.makeFile(control.dataPath)
dbcon = database.connect(control.providercacheFile)
dbcur = dbcon.cursor()
dbcur.execute("DROP TABLE IF EXISTS rel_src")
dbcur.execute("VACUUM")
dbcon.commit()
control.infoDialog(control.lang(32408).encode('utf-8'), sound=True, icon='INFO')
except:
pass
def sourcesFilter(self):
provider = control.setting('hosts.sort.provider')
quality = control.setting('hosts.quality')
if quality == '':
quality = '0'
captcha = control.setting('hosts.captcha')
random.shuffle(self.sources)
if provider == 'true':
self.sources = sorted(self.sources, key=lambda k: k['scraper'])
local = [i for i in self.sources if 'local' in i and i.get('local', False) == True]
self.sources = [i for i in self.sources if not i in local]
filter = []
filter += [i for i in self.sources if i['direct'] == True]
filter += [i for i in self.sources if i['direct'] == False]
self.sources = filter
filter = []
filter += [i for i in self.sources if not i['source'].lower() in self.hostBlackList]
self.sources = filter
filter = []
filter += local
if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4k' and i.get('debridonly', False) == True]
if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4k' and i.get('debridonly', False) == False]
if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '2k' and i.get('debridonly', False) == True]
if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '2k' and i.get('debridonly', False) == False]
if quality in ['0' ,'1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p' and i.get('debridonly', False) == True]
if quality in ['0', '1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p' and i.get('debridonly', False) == False]
if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == 'HD' and i.get('debridonly', False) == True]
if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == 'HD' and i.get('debridonly', False) == False]
filter += [i for i in self.sources if i['quality'] == 'SD' and i.get('debridonly', False) == True]
filter += [i for i in self.sources if i['quality'] == 'SD' and i.get('debridonly', False) == False]
if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'SCR']
if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'CAM']
self.sources = filter
if not captcha == 'true':
filter = [i for i in self.sources if i['source'].lower() in self.hostcapDict and not 'debrid' in i]
self.sources = [i for i in self.sources if not i in filter]
# filter = [i for i in self.sources if i['source'].lower() in self.hostblockDict and not 'debrid' in i]
# self.sources = [i for i in self.sources if not i in filter]
self.sources = self.filter_zips(self.sources)
self.sources = self.sources[:1000]
for i in range(len(self.sources)):
u = self.sources[i]['url']
s = self.sources[i]['scraper'].lower()
s = s.rsplit('.', 1)[0]
p = self.sources[i]['source']
d = self.sources[i].get('debridonly', False)
d = str(d)
# print ("DEBRID STATUS", d)
p = re.sub('v\d*$', '', p)
q = self.sources[i]['quality']
try:
f = (' | '.join(['[I]%s [/I]' % info.strip() for info in self.sources[i]['info'].split('|')]))
except:
f = ''
if d == 'True':
label = '%02d |[I]DEBRID[/I] | [B]%s[/B] | ' % (int(i+1), p)
#if not d == '': label = '%02d | [B]%s[/B] | [B]%s[/B] | ' % (int(i+1), p, d)
else:
label = '%02d | [B]%s[/B] | ' % (int(i+1), p)
if q in ['4K', '2k', '1080p', 'HD']:
label += '%s | %s | [B][I]%s [/I][/B]' % (s, f, q)
elif q == 'SD':
label += '%s | %s | [I]%s [/I]' % (s, f, q)
else:
label += '%s | %s | [I]%s [/I]' % (s, f, q)
label = label.replace('| 0 |', '|').replace(' | [I]0 [/I]', '')
label = label.replace('[I]HEVC [/I]', 'HEVC')
label = re.sub('\[I\]\s+\[/I\]', ' ', label)
label = re.sub('\|\s+\|', '|', label)
label = re.sub('\|(?:\s+|)$', '', label)
self.sources[i]['label'] = label.upper()
return self.sources
def filter_zips(self, sources):
filtered = []
for item in sources:
url = item['url'].encode('utf-8')
# ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
# print ("POSEIDON FILTERING", ext)
if "google" in url.lower():
filtered.append(item)
else:
if not any(value in url.lower() for value in self.blacklist_zips):
filtered.append(item)
return filtered
def sourcesResolve(self, item, info=False):
try:
self.url = None
u = url = item['url']
# d = item['debrid'] ;
direct = item['direct']
provider = item['scraper'].lower()
# if not provider.endswith(('_mv', '_tv', '_mv_tv')):
# sourceDict = []
# for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
# provider = [i[0] for i in sourceDict if i[1] == False and i[0].startswith(provider + '_')][0]
#source = __import__(provider, globals(), locals(), [], -1).source()
u = url = item["url"]
if url == None: raise Exception()
if any(value in url for value in _shst_regex): u = unshorten._unshorten_shst(url)
# if not d == '':
# url = debrid.resolver(url, d)
if not direct == True:
if not debridstatus == 'true': hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=False)
else: hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=True)
if hmf.valid_url() == True: url = hmf.resolve()
if url == False or url == None: raise Exception()
ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
if ext == 'rar': raise Exception()
try: headers = url.rsplit('|', 1)[1]
except: headers = ''
headers = urllib.quote_plus(headers).replace('%3D', '=') if ' ' in headers else headers
headers = dict(urlparse.parse_qsl(headers))
xbmc.log("url3:" + repr(url), xbmc.LOGNOTICE)
if url.startswith('http') and '.m3u8' in url:
result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20')
if result == None: raise Exception()
elif url.startswith('http'):
result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='30')
if result == None: raise Exception()
else:
raise Exception()
xbmc.log("url4:" + repr(url), xbmc.LOGNOTICE)
self.url = url
xbmc.log("url2:" + repr(url), xbmc.LOGNOTICE)
return url
except:
if info == True: self.errorForSources()
return
def sourcesDialog(self, items):
try:
labels = [i['label'] for i in items]
select = control.selectDialog(labels)
if select == -1: return 'close://'
next = [y for x,y in enumerate(items) if x >= select]
prev = [y for x,y in enumerate(items) if x < select][::-1]
items = [items[select]]
items = [i for i in items+next+prev][:40]
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(items)):
try:
if items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = items[i]['source']
if self.url == None: raise Exception()
self.selectedSource = items[i]['label']
try: progressDialog.close()
except: pass
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
return self.url
except:
pass
try: progressDialog.close()
except: pass
except:
try: progressDialog.close()
except: pass
def sourcesDialog2(self, items):
try:
labels = [i['label'] for i in items]
select = dialogs_list.select_ext("Select Link", items)
selected_items = select
if not len(selected_items) > 1: return self.errorForSources()
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(selected_items)):
try:
if selected_items[i]['source'] == block: raise Exception()
w = workers.Thread(self.sourcesResolve, selected_items[i])
w.start()
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(selected_items))) * i), str(selected_items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(selected_items))) * i), str(header2), str(selected_items[i]['label']))
m = ''
for x in range(3600):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k: m += '1'; m = m[-1]
if (w.is_alive() == False or x > 30) and not k: break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested == True: return sys.exit()
if progressDialog.iscanceled(): return progressDialog.close()
except:
pass
if m == '': break
if w.is_alive() == False: break
time.sleep(0.5)
if w.is_alive() == True: block = selected_items[i]['source']
if self.url == None: raise Exception()
self.selectedSource = selected_items[i]['label']
try: progressDialog.close()
except: pass
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
return self.url
except:
pass
try: progressDialog.close()
except: pass
except:
try: progressDialog.close()
except: pass
def sourcesDirect(self, items):
# filter = [i for i in items if i['source'].lower() in self.hostcapDict and i['debrid'] == '']
# items = [i for i in items if not i in filter]
# filter = [i for i in items if i['source'].lower() in self.hostblockDict and i['debrid'] == '']
items = [i for i in items]
# items = [i for i in items if ('autoplay' in i and i['autoplay'] == True) or not 'autoplay' in i]
if control.setting('autoplay.sd') == 'true':
items = [i for i in items if not i['quality'] in ['4K', '2k', '1080p', 'HD']]
u = None
header = control.addonInfo('name')
header2 = header.upper()
try:
control.sleep(1000)
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
except:
pass
for i in range(len(items)):
try:
if progressDialog.iscanceled(): break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
try:
if xbmc.abortRequested == True: return sys.exit()
url = self.sourcesResolve(items[i])
if u == None: u = url
if not url == None: break
except:
pass
try: progressDialog.close()
except: pass
return u
def errorForSources(self):
control.infoDialog(control.lang(32401).encode('utf-8'), sound=False, icon='INFO')
def getConstants(self):
self.itemProperty = 'plugin.video.poseidon.container.items'
self.metaProperty = 'plugin.video.poseidon.container.meta'
try:
self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
except:
self.hostDict = []
self.hostBlackList = []
self.hostmyDict = ['uploadrocket.net','userscloud','alfafile','.avi','.mkv','.mov','.mp4','.xvid','.divx','oboom', 'rapidgator', 'rg.to', 'uploaded', 'ul.to', 'filefactory', 'nitroflare', 'turbobit', '1fichier','uptobox', '1fich', 'uploadrocket','uploading','hugefiles', 'uploaded' , 'clicknupload']
self.hostprDict = self.hostDict + self.hostmyDict
self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se']
self.blacklist_zips = ['.zip', '.rar', '.jpeg', '.img', '.jpg', '.RAR', '.ZIP', '.png' , '.sub', '.srt']
self.hostblockDict = []
self.debridDict = debrid.debridDict()
@staticmethod
def sort_function(item):
"""
transform items quality into a string that's sort-able
Args:
item: scraper link
Returns:
sortable quality string
"""
if 'quality' in item[1][0]:
quality = item[1][0]["quality"]
else:
quality = item[1][0]["path"]["quality"]
if quality.startswith("1080"):
quality = "HDa"
elif quality.startswith("720"):
quality = "HDb"
elif quality.startswith("560"):
quality = "HDc"
elif quality == "DVD":
quality = "HDd"
elif quality == "HD":
quality = "HDe"
elif quality.startswith("480"):
quality = "SDa"
elif quality.startswith("360"):
quality = "SDb"
elif quality.startswith("SD"):
quality = "SDc"
return quality
| gpl-2.0 |
Mu5tank05/Walter | plugins/lastfm.py | 18 | 8457 | from datetime import datetime
import requests
from sqlalchemy import Table, Column, PrimaryKeyConstraint, String
from cloudbot import hook
from cloudbot.util import timeformat, web, database
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
table = Table(
"lastfm",
database.metadata,
Column('nick', String(25)),
Column('acc', String(25)),
PrimaryKeyConstraint('nick')
)
@hook.on_start()
def load_cache(db):
"""
:type db: sqlalchemy.orm.Session
"""
global last_cache
last_cache = []
for row in db.execute(table.select()):
nick = row["nick"]
account = row["acc"]
last_cache.append((nick, account))
def get_account(nick):
"""looks in last_cache for the lastfm account name"""
last_account = [row[1] for row in last_cache if nick.lower() == row[0]]
if not last_account:
return
else:
last_account = last_account[0]
return last_account
@hook.command("lastfm", "last", "np", "l", autohelp=False)
def lastfm(text, nick, db, bot, notice):
"""[user] [dontsave] - displays the now playing (or last played) track of LastFM user [user]"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "No last.fm API key set."
# check if the user asked us not to save his details
dontsave = text.endswith(" dontsave")
if dontsave:
user = text[:-9].strip().lower()
else:
user = text
if not user:
user = get_account(nick)
if not user:
notice(lastfm.__doc__)
return
params = {'method': 'user.getrecenttracks',
'api_key': api_key, 'user': user, 'limit': 1}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
response = request.json()
if 'error' in response:
return "Last.FM Error: {}.".format(response["message"])
if "track" not in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0:
return 'No recent tracks for user "{}" found.'.format(user)
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = 'is listening to'
ending = '.'
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = 'last listened to'
# lets see how long ago they listened to it
time_listened = datetime.fromtimestamp(int(track["date"]["uts"]))
time_since = timeformat.time_since(time_listened)
ending = ' ({} ago)'.format(time_since)
else:
return "error: could not parse track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
url = web.try_shorten(track["url"])
out = '{} {} "{}"'.format(user, status, title)
if artist:
out += " by \x02{}\x0f".format(artist)
if album:
out += " from the album \x02{}\x0f".format(album)
if url:
out += " {}".format(url)
# append ending based on what type it was
out += ending
if text and not dontsave:
db.execute("insert or replace into lastfm(nick, acc) values (:nick, :account)",
{'nick': nick.lower(), 'account': user})
db.commit()
load_cache(db)
return out
@hook.command("lastfmcompare", "compare", "lc")
def lastfmcompare(text, nick, bot,):
"""[user] ([user] optional) - displays the now playing (or last played) track of LastFM user [user]"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "No last.fm API key set."
if not text:
return "please specify a lastfm username to compare"
try:
user1, user2 = text.split()
except:
user2 = text
user1 = nick
user2_check = get_account(user2)
if user2_check:
user2 = user2_check
user1_check = get_account(user1)
if user1_check:
user1 = user1_check
params = {
'method': 'tasteometer.compare',
'api_key': api_key,
'type1': 'user',
'value1': user1,
'type2': 'user',
'value2': user2
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
score = float(data["comparison"]["result"]["score"])
score = float("{:.3f}".format(score * 100))
if score == 0:
return "{} and {} have no common listening history.".format(user2, user1)
level = "Super" if score > 95 else "Very High" if score > 80 else "High" if score > 60 else \
"Medium" if score > 40 else "Low" if score > 10 else "Very Low"
# I'm not even going to try to rewrite this line
artists = [f["name"] for f in data["comparison"]["result"]["artists"]["artist"]] if \
type(data["comparison"]["result"]["artists"]["artist"]) == list else \
[data["comparison"]["result"]["artists"]["artist"]["name"]] if "artist" \
in data["comparison"]["result"]["artists"] else ""
artist_string = "\x02In Common:\x02 " + \
", ".join(artists) if artists else ""
return "Musical compatibility between \x02{}\x02 and \x02{}\x02: {} (\x02{}%\x02) {}".format(user1, user2, level,
score, artist_string)
@hook.command("ltop", "ltt", autohelp=False)
def toptrack(text, nick, bot):
"""Grabs a list of the top tracks for a last.fm username"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "error: no api key set"
if text:
username = get_account(text)
if not username:
username = text
else:
username = get_account(nick)
if not username:
return "No last.fm username specified and no last.fm username is set in the database."
params = {
'api_key': api_key,
'method': 'user.gettoptracks',
'user': username,
'limit': 5
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
out = "{}'s favorite songs: ".format(username)
for r in range(5):
track_name = data["toptracks"]["track"][r]["name"]
artist_name = data["toptracks"]["track"][r]["artist"]["name"]
play_count = data["toptracks"]["track"][r]["playcount"]
out = out + "{} by {} listened to {} times. ".format(track_name, artist_name, play_count)
return out
@hook.command("lta", "topartist", autohelp=False)
def topartists(text, nick, bot):
"""Grabs a list of the top artists for a last.fm username. You can set your lastfm username with .l username"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "error: no api key set"
if text:
username = get_account(text)
if not username:
username = text
else:
username = get_account(nick)
if not username:
return "No last.fm username specified and no last.fm username is set in the database."
params = {
'api_key': api_key,
'method': 'user.gettopartists',
'user': username,
'limit': 5
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
out = "{}'s favorite artists: ".format(username)
for r in range(5):
artist_name = data["topartists"]["artist"][r]["name"]
play_count = data["topartists"]["artist"][r]["playcount"]
out = out + "{} listened to {} times. ".format(artist_name, play_count)
return out
| gpl-3.0 |
pmquang/FuzzLabs | tools/mp3_embed_image.py | 7 | 4620 | #!/usr/bin/python
# ====================================================================================
# apt-get install python-mutagen
# ====================================================================================
# Mutagen has same issues with the save() function or, I just could not figure out
# how to do "save as" properly. As of this:
# 1. a backup copy of the Base MP3 File is created.
# 2. mutagen will update the Base MP3 File
# 3. this tool will copy the updated file to the destination
# 4. the Base MP3 File will be overwritten, with the original, backup one
# 5. repeat from 1)
# This tool ONLY works with MP3 files which has an APIC tag in the ID3 section.
# So, if you want to use your custom MP3 use Mp3tag for example to embed a default
# image. After, you can use this tool.
import os
import sys
import shutil
import argparse
from glob import glob
from mutagen.mp3 import MP3
# ------------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------------
def process(src_dir, dest_dir, base_file, image_type, ext):
shutil.copy2(base_file + ".orig", base_file)
print "Status:"
print " %-40s%s" % ("Source directory:", src_dir)
print " %-40s%s" % ("Destination directory:", dest_dir)
print " %-40s%s" % ("Base MP3:", base_file)
print " %-40s%s" % ("Image type:", image_type)
print " %-40s%s" % ("Image extension:", ext)
files = [y for x in os.walk(src_dir) for y in glob(os.path.join(x[0], '*.' + ext))]
counter = 0
for image_file in files:
print " %-40s%s" % ("Image:", image_file)
dd = dest_dir + "/" + str(counter / 1000)
df = dd + "/test." + str(counter) + ".mp3"
print " %-40s%s" % ("File destination:", df)
if not os.path.exists(dd):
try:
os.makedirs(dd)
except Exception, ex:
print "Could not create destination directory: %s" % str(ex)
sys.exit(6)
image_data = None
with open(image_file, 'r') as f:
image_data = f.read()
if image_data == None:
print "Could not read image data, exiting..."
sys.exit(7)
print " %-40s%s" % ("Image size:", len(image_data))
audio = MP3(base_file)
picturetag = audio.tags['APIC:']
picturetag.desc = "Mutation #%d" % counter
picturetag.mime = image_type
picturetag.data = image_data
audio.tags['APIC:'] = picturetag
audio.save()
shutil.copy2(base_file, df)
counter += 1
# ------------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------------
def validate_path(t_path, is_source = True):
exists = os.path.isdir(t_path)
if exists or is_source: return exists
os.makedirs(t_path)
exists = os.path.isdir(t_path)
return exists
# ------------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Embed image (JPEG or PNG) into MP3 as cover image')
parser.add_argument('-sd', metavar='<path>', required = True,
help='Source directory containing the image files')
parser.add_argument('-dd', metavar='<path>', required = True,
help='Destination directory to store the MP3 files to')
parser.add_argument('-it', metavar='<type>', required = True,
help='Media type (jpeg/png)')
parser.add_argument('-bf', metavar='<path>', required = True,
help='MP3 file to use as base')
args = parser.parse_args()
if args.sd == None or not validate_path(args.sd):
print "Source directory does not exist, exiting."
sys.exit(1)
if args.dd == None:
print "No destination directory specified, exiting."
sys.exit(2)
if not validate_path(args.dd, False):
print "Could not set up destination directory, exiting."
sys.exit(3)
if not os.path.isfile(args.bf):
print "Could not find base MP3 file, exiting."
sys.exit(4)
if not os.path.isfile(args.bf + ".orig"):
shutil.copy2(args.bf, args.bf + ".orig")
if args.it != "jpeg" and args.it != "png":
print "Unsupported image type, exiting."
sys.exit(5)
extension = ""
if args.it == "jpeg": extension = "jpg"
if args.it == "png": extension = "png"
process(args.sd, args.dd, args.bf, u"image/" + args.it, extension)
| gpl-2.0 |
dmonner/tweater | py/nltk/cluster/kmeans.py | 4 | 7550 | # Natural Language Toolkit: K-Means Clusterer
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import numpy
import random
from api import *
from util import *
class KMeansClusterer(VectorSpaceClusterer):
"""
The K-means clusterer starts with k arbitrary chosen means then allocates
each vector to the cluster with the closest mean. It then recalculates the
means of each cluster as the centroid of the vectors in the cluster. This
process repeats until the cluster memberships stabilise. This is a
hill-climbing algorithm which may converge to a local maximum. Hence the
clustering is often repeated with random initial means and the most
commonly occuring output means are chosen.
"""
def __init__(self, num_means, distance, repeats=1,
conv_test=1e-6, initial_means=None,
normalise=False, svd_dimensions=None,
rng=None):
"""
@param num_means: the number of means to use (may use fewer)
@type num_means: int
@param distance: measure of distance between two vectors
@type distance: function taking two vectors and returing a float
@param repeats: number of randomised clustering trials to use
@type repeats: int
@param conv_test: maximum variation in mean differences before
deemed convergent
@type conv_test: number
@param initial_means: set of k initial means
@type initial_means: sequence of vectors
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
@param rng: random number generator (or None)
@type rng: Random
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._num_means = num_means
self._distance = distance
self._max_difference = conv_test
assert not initial_means or len(initial_means) == num_means
self._means = initial_means
assert repeats >= 1
assert not (initial_means and repeats > 1)
self._repeats = repeats
if rng: self._rng = rng
else: self._rng = random.Random()
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print 'Warning: means will be discarded for subsequent trials'
meanss = []
for trial in range(self._repeats):
if trace: print 'k-means trial', trial
if not self._means or trial > 1:
self._means = self._rng.sample(vectors, self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(cmp = _vector_compare)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference == None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
def _cluster_vectorspace(self, vectors, trace=False):
if self._num_means < len(vectors):
# perform k-means clustering
converged = False
while not converged:
# assign the tokens to clusters based on minimum distance to
# the cluster means
clusters = [[] for m in range(self._num_means)]
for vector in vectors:
index = self.classify_vectorspace(vector)
clusters[index].append(vector)
if trace: print 'iteration'
#for i in range(self._num_means):
#print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
# recalculate cluster means by computing the centroid of each cluster
new_means = map(self._centroid, clusters)
# measure the degree of change from the previous step for convergence
difference = self._sum_distances(self._means, new_means)
if difference < self._max_difference:
converged = True
# remember the new means
self._means = new_means
def classify_vectorspace(self, vector):
# finds the closest cluster centroid
# returns that cluster's index
best_distance = best_index = None
for index in range(len(self._means)):
mean = self._means[index]
dist = self._distance(vector, mean)
if best_distance == None or dist < best_distance:
best_index, best_distance = index, dist
return best_index
def num_clusters(self):
if self._means:
return len(self._means)
else:
return self._num_means
def means(self):
"""
The means used for clustering.
"""
return self._means
def _sum_distances(self, vectors1, vectors2):
difference = 0.0
for u, v in zip(vectors1, vectors2):
difference += self._distance(u, v)
return difference
def _centroid(self, cluster):
assert len(cluster) > 0
centroid = copy.copy(cluster[0])
for vector in cluster[1:]:
centroid += vector
return centroid / float(len(cluster))
def __repr__(self):
return '<KMeansClusterer means=%s repeats=%d>' % \
(self._means, self._repeats)
def _vector_compare(x, y):
xs, ys = sum(x), sum(y)
if xs < ys: return -1
elif xs > ys: return 1
else: return 0
#################################################################################
def demo():
# example from figure 14.9, page 517, Manning and Schutze
from nltk import cluster
vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
means = [[4, 3], [5, 5]]
clusterer = cluster.KMeansClusterer(2, euclidean_distance, initial_means=means)
clusters = clusterer.cluster(vectors, True, trace=True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test k-means using the euclidean distance metric, 2 means and repeat
# clustering 10 times with random seeds
clusterer = cluster.KMeansClusterer(2, euclidean_distance, repeats=10)
clusters = clusterer.cluster(vectors, True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
# classify a new vector
vector = numpy.array([3, 3])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
print
if __name__ == '__main__':
demo()
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/binhex.py | 2 | 13890 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import io
import os
import sys
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
LINELEN = 64
RUNCHAR = b"\x90"
#
# This code is no longer byte-order dependent
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
fp = io.open(name, 'rb')
# Quick check for textfile
data = fp.read(512)
if 0 not in data:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return b''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
self.hqxdata = b''
self.linelen = LINELEN - 1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen // 3) * 3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last] + b'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + b':\n')
def close(self):
if self.data:
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if isinstance(ofp, str):
ofname = ofp
ofp = io.open(ofname, 'wb')
if os.name == 'mac':
fss = FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error('Filename too long')
d = bytes([nl]) + name.encode("latin-1") + b'\0'
tp, cr = finfo.Type, finfo.Creator
if isinstance(tp, str):
tp = tp.encode("latin-1")
if isinstance(cr, str):
cr = cr.encode("latin-1")
d2 = tp + cr
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error('Writing data at the wrong time')
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Writing resource data at the wrong time')
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Close at the wrong time')
if self.rlen != 0:
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""binhex(infilename, outfilename): create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = io.open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = b''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd + 2) // 3) * 4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while True:
try:
decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error('Premature EOF on binhex file')
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error('Premature EOF on binhex file')
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = b''
self.post_buffer = b''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd - len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if isinstance(ifp, str):
ifp = io.open(ifp, 'rb')
#
# Find initial colon.
#
while True:
ch = ifp.read(1)
if not ch:
raise Error("No binhex data found")
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == b'\r':
continue
if ch == b':':
break
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error('CRC error, computed %x, read %x'
% (self.crc, filecrc))
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error('Read data at wrong time')
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = b''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error('close_data at wrong time')
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error('Read resource data at wrong time')
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""hexbin(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = FSSpec(out)
out = ofss.as_pathname()
ofp = io.open(out, 'wb')
# XXXX Do translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while True:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
| mit |
sachitanandpandey/sos_spandey | sos/plugins/mongodb.py | 5 | 1538 | # Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class MongoDb(Plugin, DebianPlugin, UbuntuPlugin):
"""MongoDB document database
"""
plugin_name = 'mongodb'
profiles = ('services',)
packages = ('mongodb-server',)
files = ('/etc/mongodb.conf',)
def setup(self):
self.add_copy_spec([
"/etc/mongodb.conf",
"/var/log/mongodb/mongodb.log"
])
def postproc(self):
self.do_file_sub(
"/etc/mongodb.conf",
r"(mms-token\s*=\s*.*)",
r"mms-token = ********"
)
class RedHatMongoDb(MongoDb, RedHatPlugin):
def setup(self):
super(RedHatMongoDb, self).setup()
self.add_copy_spec("/etc/sysconfig/mongodb")
# vim: et ts=4 sw=4
| gpl-2.0 |
Bysmyyr/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/ec2/volumestatus.py | 181 | 6329 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.instancestatus import Status, Details
class Event(object):
"""
A status event for an instance.
:ivar type: The type of the event.
:ivar id: The ID of the event.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, type=None, id=None, description=None,
not_before=None, not_after=None):
self.type = type
self.id = id
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.type
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'eventType':
self.type = value
elif name == 'eventId':
self.id = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class Action(object):
"""
An action for an instance.
:ivar code: The code for the type of the action.
:ivar id: The ID of the event.
:ivar type: The type of the event.
:ivar description: A description of the action.
"""
def __init__(self, code=None, id=None, description=None, type=None):
self.code = code
self.id = id
self.type = type
self.description = description
def __repr__(self):
return 'Action:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'eventType':
self.type = value
elif name == 'eventId':
self.id = value
elif name == 'description':
self.description = value
elif name == 'code':
self.code = value
else:
setattr(self, name, value)
class ActionSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
action = Action()
self.append(action)
return action
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class VolumeStatus(object):
"""
Represents an EC2 Volume status as reported by
DescribeVolumeStatus request.
:ivar id: The volume identifier.
:ivar zone: The availability zone of the volume
:ivar volume_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
:ivar events: A list of events relevant to the instance.
:ivar actions: A list of events relevant to the instance.
"""
def __init__(self, id=None, zone=None):
self.id = id
self.zone = zone
self.volume_status = Status()
self.events = None
self.actions = None
def __repr__(self):
return 'VolumeStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'actionsSet':
self.actions = ActionSet()
return self.actions
elif name == 'volumeStatus':
return self.volume_status
else:
return None
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
else:
setattr(self, name, value)
class VolumeStatusSet(list):
"""
A list object that contains the results of a call to
DescribeVolumeStatus request. Each element of the
list will be an VolumeStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = VolumeStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'NextToken':
self.next_token = value
setattr(self, name, value)
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/system/crypttab.py | 23 | 11164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Steve <yo@groks.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: crypttab
short_description: Encrypted Linux block devices
description:
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
version_added: "1.9"
options:
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
will be stripped from I(name).
type: str
required: yes
state:
description:
- Use I(present) to add a line to C(/etc/crypttab) or update its definition
if already present.
- Use I(absent) to remove a line with matching I(name).
- Use I(opts_present) to add options to those already present; options with
different values will be updated.
- Use I(opts_absent) to remove options from the existing set.
type: str
required: yes
choices: [ absent, opts_absent, opts_present, present ]
backing_device:
description:
- Path to the underlying block device or file, or the UUID of a block-device
prefixed with I(UUID=).
type: str
password:
description:
- Encryption password, the path to a file containing the password, or
C(-) or unset if the password should be entered at boot.
type: path
opts:
description:
- A comma-delimited list of options. See C(crypttab(5) ) for details.
type: str
path:
description:
- Path to file to use instead of C(/etc/crypttab).
- This might be useful in a chroot environment.
type: path
default: /etc/crypttab
author:
- Steve (@groks)
'''
EXAMPLES = r'''
- name: Set the options explicitly a device which must already exist
crypttab:
name: luks-home
state: present
opts: discard,cipher=aes-cbc-essiv:sha256
- name: Add the 'discard' option to any existing options for all devices
crypttab:
name: '{{ item.device }}'
state: opts_present
opts: discard
loop: '{{ ansible_mounts }}'
when: "'/dev/mapper/luks-' in {{ item.device }}"
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
backing_device=dict(type='str'),
password=dict(type='path'),
opts=dict(type='str'),
path=dict(type='path', default='/etc/crypttab')
),
supports_check_mode=True,
)
backing_device = module.params['backing_device']
password = module.params['password']
opts = module.params['opts']
state = module.params['state']
path = module.params['path']
name = module.params['name']
if name.startswith('/dev/mapper/'):
name = name[len('/dev/mapper/'):]
if state != 'absent' and backing_device is None and password is None and opts is None:
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
**module.params)
if 'opts' in state and (backing_device is not None or password is not None):
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
**module.params)
for arg_name, arg in (('name', name),
('backing_device', backing_device),
('password', password),
('opts', opts)):
if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
**module.params)
try:
crypttab = Crypttab(path)
existing_line = crypttab.match(name)
except Exception as e:
module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
exception=traceback.format_exc(), **module.params)
if 'present' in state and existing_line is None and backing_device is None:
module.fail_json(msg="'backing_device' required to add a new entry",
**module.params)
changed, reason = False, '?'
if state == 'absent':
if existing_line is not None:
changed, reason = existing_line.remove()
elif state == 'present':
if existing_line is not None:
changed, reason = existing_line.set(backing_device, password, opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_present':
if existing_line is not None:
changed, reason = existing_line.opts.add(opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_absent':
if existing_line is not None:
changed, reason = existing_line.opts.remove(opts)
if changed and not module.check_mode:
try:
f = open(path, 'wb')
f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
finally:
f.close()
module.exit_json(changed=changed, msg=reason, **module.params)
class Crypttab(object):
_lines = []
def __init__(self, path):
self.path = path
if not os.path.exists(path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
open(path, 'a').close()
try:
f = open(path, 'r')
for line in f.readlines():
self._lines.append(Line(line))
finally:
f.close()
def add(self, line):
self._lines.append(line)
return True, 'added line'
def lines(self):
for line in self._lines:
if line.valid():
yield line
def match(self, name):
for line in self.lines():
if line.name == name:
return line
return None
def __str__(self):
lines = []
for line in self._lines:
lines.append(str(line))
crypttab = '\n'.join(lines)
if len(crypttab) == 0:
crypttab += '\n'
if crypttab[-1] != '\n':
crypttab += '\n'
return crypttab
class Line(object):
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
self.line = line
self.name = name
self.backing_device = backing_device
self.password = password
self.opts = Options(opts)
if line is not None:
self.line = self.line.rstrip('\n')
if self._line_valid(line):
self.name, backing_device, password, opts = self._split_line(line)
self.set(backing_device, password, opts)
def set(self, backing_device, password, opts):
changed = False
if backing_device is not None and self.backing_device != backing_device:
self.backing_device = backing_device
changed = True
if password is not None and self.password != password:
self.password = password
changed = True
if opts is not None:
opts = Options(opts)
if opts != self.opts:
self.opts = opts
changed = True
return changed, 'updated line'
def _line_valid(self, line):
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
return False
return True
def _split_line(self, line):
fields = line.split()
try:
field2 = fields[2]
except IndexError:
field2 = None
try:
field3 = fields[3]
except IndexError:
field3 = None
return (fields[0],
fields[1],
field2,
field3)
def remove(self):
self.line, self.name, self.backing_device = '', None, None
return True, 'removed line'
def valid(self):
if self.name is not None and self.backing_device is not None:
return True
return False
def __str__(self):
if self.valid():
fields = [self.name, self.backing_device]
if self.password is not None or self.opts:
if self.password is not None:
fields.append(self.password)
else:
fields.append('none')
if self.opts:
fields.append(str(self.opts))
return ' '.join(fields)
return self.line
class Options(dict):
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
def __init__(self, opts_string):
super(Options, self).__init__()
self.itemlist = []
if opts_string is not None:
for opt in opts_string.split(','):
kv = opt.split('=')
if len(kv) > 1:
k, v = (kv[0], kv[1])
else:
k, v = (kv[0], None)
self[k] = v
def add(self, opts_string):
changed = False
for k, v in Options(opts_string).items():
if k in self:
if self[k] != v:
changed = True
else:
changed = True
self[k] = v
return changed, 'updated options'
def remove(self, opts_string):
changed = False
for k in Options(opts_string):
if k in self:
del self[k]
changed = True
return changed, 'removed options'
def keys(self):
return self.itemlist
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def __iter__(self):
return iter(self.itemlist)
def __setitem__(self, key, value):
if key not in self:
self.itemlist.append(key)
super(Options, self).__setitem__(key, value)
def __delitem__(self, key):
self.itemlist.remove(key)
super(Options, self).__delitem__(key)
def __ne__(self, obj):
return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
def __str__(self):
ret = []
for k, v in self.items():
if v is None:
ret.append(k)
else:
ret.append('%s=%s' % (k, v))
return ','.join(ret)
if __name__ == '__main__':
main()
| gpl-3.0 |
synthicity/urbansim | urbansim/utils/sampling.py | 4 | 7852 | import math
import numpy as np
import pandas as pd
def get_probs(data, prob_column=None):
"""
Checks for presence of a probability column and returns the result
as a numpy array. If the probabilities are weights (i.e. they don't
sum to 1), then this will be recalculated.
Parameters
----------
data: pandas.DataFrame
Table to sample from.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
numpy.array
"""
if prob_column is None:
p = None
else:
p = data[prob_column].fillna(0).values
if p.sum() == 0:
p = np.ones(len(p))
if abs(p.sum() - 1.0) > 1e-8:
p = p / (1.0 * p.sum())
return p
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50):
"""
Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# check for probabilities
p = get_probs(data, prob_column)
# determine avg number of accounting items per sample (e.g. persons per household)
per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))
curr_total = 0
remaining = total
sample_rows = pd.DataFrame()
closest = None
closest_remain = total
matched = False
for i in range(0, max_iterations):
# stop if we've hit the control
if remaining == 0:
matched = True
break
# if sampling with probabilities, re-caclc the # of items per sample
# after the initial sample, this way the sample size reflects the probabilities
if p is not None and i == 1:
per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows))
# update the sample
num_samples = int(math.ceil(math.fabs(remaining) / per_sample))
if remaining > 0:
# we're short, add to the sample
curr_ids = np.random.choice(data.index.values, num_samples, p=p)
sample_rows = pd.concat([sample_rows, data.loc[curr_ids]])
else:
# we've overshot, remove from existing samples (FIFO)
sample_rows = sample_rows.iloc[num_samples:].copy()
# update the total and check for the closest result
curr_total = sample_rows[accounting_column].sum()
remaining = total - curr_total
if abs(remaining) < closest_remain:
closest_remain = abs(remaining)
closest = sample_rows
return closest, matched
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None):
"""
Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# make sure this is even feasible
if total > data[accounting_column].sum():
raise ValueError('Control total exceeds the available samples')
# check for probabilities
p = get_probs(data, prob_column)
# shuffle the rows
if p is None:
# random shuffle
shuff_idx = np.random.permutation(data.index.values)
else:
# weighted shuffle
ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index)
ran_p.sort_values(ascending=False)
shuff_idx = ran_p.index.values
# get the initial sample
shuffle = data.loc[shuff_idx]
csum = np.cumsum(shuffle[accounting_column].values)
pos = np.searchsorted(csum, total, 'right')
sample = shuffle.iloc[:pos]
# refine the sample
sample_idx = sample.index.values
sample_total = sample[accounting_column].sum()
shortage = total - sample_total
matched = False
for idx, row in shuffle.iloc[pos:].iterrows():
if shortage == 0:
# we've matached
matched = True
break
# add the current element if it doesnt exceed the total
cnt = row[accounting_column]
if cnt <= shortage:
sample_idx = np.append(sample_idx, idx)
shortage -= cnt
return shuffle.loc[sample_idx].copy(), matched
def sample_rows(total, data, replace=True, accounting_column=None,
max_iterations=50, prob_column=None, return_status=False):
"""
Samples and returns rows from a data frame while matching a desired control total. The total may
represent a simple row count or may attempt to match a sum/quantity from an accounting column.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
replace: bool, optional, default True
Indicates if sampling with or without replacement.
accounting_column: string, optional
Name of column with accounting totals/quantities to apply towards the control.
If not provided then row counts will be used for accounting.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied. Only applicable when sampling with replacement.
prob_column: string, optional, default None
If provided, name of the column in the data frame to provide probabilities
or weights. If not provided, the sampling is random.
return_status: bool, optional, default True
If True, will also return a bool indicating if the total was matched exactly.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
If return_status is True, returns True if total is matched exactly.
"""
if not data.index.is_unique:
raise ValueError('Data must have a unique index')
# simplest case, just return n random rows
if accounting_column is None:
if replace is False and total > len(data.index.values):
raise ValueError('Control total exceeds the available samples')
p = get_probs(prob_column)
rows = data.loc[np.random.choice(
data.index.values, int(total), replace=replace, p=p)].copy()
matched = True
# sample with accounting
else:
if replace:
rows, matched = accounting_sample_replace(
total, data, accounting_column, prob_column, max_iterations)
else:
rows, matched = accounting_sample_no_replace(
total, data, accounting_column, prob_column)
# return the results
if return_status:
return rows, matched
else:
return rows
| bsd-3-clause |
bt3gl/Neat-Problems-in-Python-and-Flask | Version-Control/src/metadata.py | 1 | 4151 | #!/usr/bin/env python
__author__ = "Mari Wahl"
__email__ = "marina.w4hl@gmail.com"
__description__= "Builds the metadata for our version control."
import datetime
import sys
import os
import system_operations
from constants import BACKUP_DIR, CONTEND_DIR, OLD_VERSIONS_DIR
"""
print the HEAD file
"""
def print_log(dest):
header_file = dest + '/HEAD'
try:
assert(os.path.isfile(header_file))
except:
print("HEAD is not defined yet. Try to create a snapshot first.")
sys.exit(0)
with open(header_file, 'r') as f:
for line in f:
print(line.split("\n")[0])
"""
print the current snapshot
"""
def print_latest_snapshot(dest):
# Verify whether destination folder(.b3g) exists
if not os.path.exists(dest):
print("You do not have any snapshot yet.")
sys.exit(0)
folders = system_operations.create_list_of_folders(dest)
folders = map(int, folders)
print("The current snapshot is " + str(max(folders)) + '.\n')
"""
print the difference between two snapshots
"""
def print_pretty_header(dest, ss1, ss2):
header_file = dest + '/HEAD'
with open(header_file, 'r') as f:
for line in f:
number = line.split(':')
if number[0] == ss1 or number[0] == ss2:
message = number[1].split(",")
print("Snapshot %s was created on %s" %(number[0], message[0]))
print("Snapshot message: " + message[1])
def print_pretty(diff_snapshot1, diff_snapshot2, ss1, ss2):
print("Printing the differences between snapshots {0} and {1}:". format(ss1,ss2))
print("Diff in snaphost %s:" %ss1)
for file in diff_snapshot1:
print(file)
print("Diff in snaphost %s:" %ss2)
for file in diff_snapshot2:
print(file)
def print_diff(dest, ss1, ss2):
# create two sets with files from the two snapshots
snapshot1 = set(system_operations.create_list_of_files(dest + "/" + ss1))
snapshot2 = set(system_operations.create_list_of_files(dest + "/" + ss2))
# get all the files that are not in common
diff_snapshot1 = snapshot1 - snapshot2
diff_snapshot2 = snapshot2 - snapshot1
# print diff
print_pretty_header(dest, ss1, ss2)
print_pretty(diff_snapshot1, diff_snapshot2, ss1, ss2)
return diff_snapshot1, diff_snapshot2
"""
update the header and is called in the checkout
"""
def update_header_checkout(source, argument, folder_id):
# move the old header to the folder id
deprecated_head = source + "/" + OLD_VERSIONS_DIR + "/HEAD_" + folder_id
head_file = source + '/HEAD'
old_head = system_operations.renaming_file(head_file, deprecated_head)
# copy anything before the snapshot to the new header
new_lines = []
with open(deprecated_head) as f:
lines = f.readlines()
for line in lines:
if line.split(":")[0] != argument:
new_lines.append(line.split("\n")[0])
else:
break
# write the new HEAD file
system_operations.creating_file(head_file, 'w')
with open(head_file) as f:
for line in new_lines:
system_operations.writing_file(head_file, 'a', line)
update_header_snapshot(argument, source, "# checkout at " + folder_id)
"""
update the header and is called in the snapshot
"""
def update_header_snapshot(latest_snapshot_number='0', dest=BACKUP_DIR, message=""):
# check if there is a head file, otherwise, create one
header_file = dest + '/HEAD'
system_operations.creating_file(header_file)
# write the header number
now = datetime.datetime.now()
snapshot_info = latest_snapshot_number + ': ' + now.strftime("%Y/%m/%d-%Hh%Mm") \
+ ', ' + message
system_operations.writing_file(header_file, 'a', snapshot_info)
if __name__ == '__main__':
print("\n--------------------------------------------------------------")
print("This is just a module. To run the full program, please type:")
print("$ ./b3g OPTIONS")
print("--------------------------------------------------------------\n")
| mit |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/django/conf/locale/it/formats.py | 82 | 2094 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i:s' # 14:30:59
DATETIME_FORMAT = 'l d F Y H:i:s' # Mercoledì 25 Ottobre 2006 14:30:59
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i:s' # 25/10/2009 14:30:59
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
40223135/40223135w17 | static/Brython3.1.1-20150328-091302/Lib/datetime.py | 628 | 75044 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
| gpl-3.0 |
agrista/odoo-saas | addons/stock_account/res_config.py | 315 | 2277 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_config_settings(osv.osv_memory):
_inherit = 'stock.config.settings'
_columns = {
'group_stock_inventory_valuation': fields.boolean("Generate accounting entries per stock movement",
implied_group='stock_account.group_inventory_valuation',
help="""Allows to configure inventory valuations on products and product categories."""),
'module_stock_invoice_directly': fields.boolean("Create and open the invoice when the user finish a delivery order",
help='This allows to automatically launch the invoicing wizard if the delivery is '
'to be invoiced when you send or deliver goods.\n'
'-This installs the module stock_invoice_directly.'),
'module_stock_landed_costs': fields.boolean("Calculate landed costs on products",
help="""Install the module that allows to affect landed costs on pickings, and split them onto the different products."""),
}
def onchange_landed_costs(self, cr, uid, ids, module_landed_costs, context=None):
if module_landed_costs:
return {'value': {'group_stock_inventory_valuation': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dancingdan/tensorflow | tensorflow/python/kernel_tests/random/multinomial_op_big_test.py | 86 | 3462 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Long tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
# check that events with tiny probabilities are not over-sampled
def testLargeDynamicRange(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = sess.run(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[1], 100000000)
def testLargeDynamicRange2(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[0, -30]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = sess.run(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[0], 100000000)
def testLargeDynamicRange3(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
# here the cpu undersamples and won't pass this test either
with self.test_session(use_gpu=True) as sess:
samples = random_ops.multinomial(
constant_op.constant([[0, -17]], dtype=dtypes.float32),
num_samples=1000000,
seed=22)
# we'll run out of memory if we try to draw 1e9 samples directly
# really should fit in 12GB of memory...
for _ in range(100):
x = sess.run(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertGreater(counts_by_indices[1], 0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
veger/ansible | lib/ansible/modules/storage/netapp/na_ontap_svm_options.py | 59 | 5200 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
short_description: NetApp ONTAP Modify SVM Options
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Modify ONTAP SVM Options
- Only Options that appear on "vserver options show" can be set
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_svm_options
version_added: "2.7"
options:
name:
description:
- Name of the option.
value:
description:
- Value of the option.
- Value must be in quote
vserver:
description:
- The name of the vserver to which this option belongs to.
required: True
'''
EXAMPLES = """
- name: Set SVM Options
na_ontap_svm_options:
vserver: "{{ netapp_vserver_name }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
name: snmp.enable
value: 'on'
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPSvnOptions(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type="str", default=None),
value=dict(required=False, type='str', default=None),
vserver=dict(required=True, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
return
def set_options(self):
"""
Set a specific option
:return: None
"""
option_obj = netapp_utils.zapi.NaElement("options-set")
option_obj.add_new_child('name', self.parameters['name'])
option_obj.add_new_child('value', self.parameters['value'])
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc())
def list_options(self):
"""
List all Options on the Vserver
:return: None
"""
option_obj = netapp_utils.zapi.NaElement("options-list-info")
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc())
def is_option_set(self):
"""
Checks to see if an option is set or not
:return: If option is set return True, else return False
"""
option_obj = netapp_utils.zapi.NaElement("options-get-iter")
options_info = netapp_utils.zapi.NaElement("option-info")
if self.parameters.get('name') is not None:
options_info.add_new_child("name", self.parameters['name'])
if self.parameters.get('value') is not None:
options_info.add_new_child("value", self.parameters['value'])
if "vserver" in self.parameters.keys():
if self.parameters['vserver'] is not None:
options_info.add_new_child("vserver", self.parameters['vserver'])
query = netapp_utils.zapi.NaElement("query")
query.add_child_elem(options_info)
option_obj.add_child_elem(query)
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
return False
def apply(self):
changed = False
netapp_utils.ems_log_event("na_ontap_svm_options", self.server)
is_set = self.is_option_set()
if not is_set:
self.set_options()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
:return: none
"""
cg_obj = NetAppONTAPSvnOptions()
cg_obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
HyperBaton/ansible | lib/ansible/modules/network/dellos9/dellos9_facts.py | 27 | 17598 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2016 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos9_facts
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Collect facts from remote devices running Dell EMC Networking OS9
description:
- Collects a base set of device facts from a remote device that
is running OS9. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: dellos9
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
default: [ '!config' ]
notes:
- This module requires OS9 version 9.10.0.1P13 or above.
- This module requires an increase of the SSH connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be also be done with the M(dellos9_config) module.
"""
EXAMPLES = """
# Collect all facts from the device
- dellos9_facts:
gather_subset: all
# Collect only the config and default facts
- dellos9_facts:
gather_subset:
- config
# Do not collect hardware facts
- dellos9_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
try:
from itertools import izip
except ImportError:
izip = zip
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos9.dellos9 import run_commands
from ansible.module_utils.network.dellos9.dellos9 import dellos9_argument_spec, check_args
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = [
'show version',
'show inventory',
'show running-config | grep hostname'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
data = self.responses[1]
self.facts['serialnum'] = self.parse_serialnum(data)
data = self.responses[2]
self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'Software Version:\s*(.+)', data)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'^hostname (.+)', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'^System Type:\s*(.+)', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
for line in data.split('\n'):
if line.startswith('*'):
match = re.search(
r'\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M)
if match:
return match.group(3)
class Hardware(FactsBase):
COMMANDS = [
'show file-systems',
'show memory | except Processor'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
self.facts['filesystems'] = self.parse_filesystems(data)
data = self.responses[1]
match = re.findall(r'\s(\d+)\s', data)
if match:
self.facts['memtotal_mb'] = int(match[0]) // 1024
self.facts['memfree_mb'] = int(match[2]) // 1024
def parse_filesystems(self, data):
return re.findall(r'\s(\S+):$', data, re.M)
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses[0]
class Interfaces(FactsBase):
COMMANDS = [
'show interfaces',
'show ipv6 interface',
'show lldp neighbors detail',
'show inventory'
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
interfaces = self.parse_interfaces(data)
for key in list(interfaces.keys()):
if "ManagementEthernet" in key:
temp_parsed = interfaces[key]
del interfaces[key]
interfaces.update(self.parse_mgmt_interfaces(temp_parsed))
for key in list(interfaces.keys()):
if "Vlan" in key:
temp_parsed = interfaces[key]
del interfaces[key]
interfaces.update(self.parse_vlan_interfaces(temp_parsed))
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.responses[1]
if len(data) > 0:
data = self.parse_ipv6_interfaces(data)
self.populate_ipv6_interfaces(data)
data = self.responses[3]
if 'LLDP' in self.get_protocol_list(data):
neighbors = self.responses[2]
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def get_protocol_list(self, data):
start = False
protocol_list = list()
for line in data.split('\n'):
match = re.search(r'Software Protocol Configured\s*', line)
if match:
start = True
continue
if start:
line = line.strip()
if line.isalnum():
protocol_list.append(line)
return protocol_list
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in interfaces.items():
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in data.items():
if key in self.facts['interfaces']:
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (\S+)', value, re.M)
for addr, subnet in izip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
for entry in neighbors.split(
'========================================================================'):
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
newline_count = 0
interface_start = True
for line in data.split('\n'):
if interface_start:
newline_count = 0
if len(line) == 0:
newline_count += 1
if newline_count == 2:
interface_start = True
continue
else:
match = re.match(r'^(\S+) (\S+)', line)
if match and interface_start:
interface_start = False
key = match.group(0)
parsed[key] = line
else:
parsed[key] += '\n%s' % line
return parsed
def parse_mgmt_interfaces(self, data):
parsed = dict()
interface_start = True
for line in data.split('\n'):
match = re.match(r'^(\S+) (\S+)', line)
if "Time since" in line:
interface_start = True
parsed[key] += '\n%s' % line
continue
elif match and interface_start:
interface_start = False
key = match.group(0)
parsed[key] = line
else:
parsed[key] += '\n%s' % line
return parsed
def parse_vlan_interfaces(self, data):
parsed = dict()
interface_start = True
line_before_end = False
for line in data.split('\n'):
match = re.match(r'^(\S+) (\S+)', line)
match_endline = re.match(r'^\s*\d+ packets, \d+ bytes$', line)
if "Output Statistics" in line:
line_before_end = True
parsed[key] += '\n%s' % line
elif match_endline and line_before_end:
line_before_end = False
interface_start = True
parsed[key] += '\n%s' % line
elif match and interface_start:
interface_start = False
key = match.group(0)
parsed[key] = line
else:
parsed[key] += '\n%s' % line
return parsed
def parse_ipv6_interfaces(self, data):
parsed = dict()
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+) (\S+)', line)
if match:
key = match.group(0)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
if match.group(1) != "not":
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
if match.group(1) != "not":
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'LineSpeed (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) duplex', data, re.M)
if match:
return match.group(1)
def parse_mediatype(self, data):
media = re.search(r'(.+) media present, (.+)', data, re.M)
if media:
match = re.search(r'type is (.+)$', media.group(0), re.M)
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^\sLocal Interface (\S+\s\S+)', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'Remote System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Remote Port ID: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(dellos9_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
dcos/dcos | packages/dcos-integration-test/extra/test_etcd.py | 1 | 1236 | import json
import uuid
import pytest
from dcos_test_utils.enterprise import EnterpriseApiSession
from dcos_test_utils.etcd import EtcdCtl, is_enterprise
class TestEtcdctlOpen:
def test_fetching_members(self, dcos_api_session: EnterpriseApiSession) -> None:
if is_enterprise:
pytest.skip("not suitable for Enterprise DC/OS")
etcd_ctl = EtcdCtl()
cluster_health_cmd = ["member", "list", "-w", "json"]
p = etcd_ctl.run(cluster_health_cmd, check=True)
member_info = json.loads(p.stdout.strip())
members = member_info["members"]
assert len(members) == len(dcos_api_session.masters)
def test_write_and_read(self) -> None:
if is_enterprise:
pytest.skip("not suitable for Enterprise DC/OS")
key = "/int-testing/foo-{}".format(uuid.uuid4())
value = str(uuid.uuid4())
etcd_ctl = EtcdCtl()
write_cmd = ["put", key, value]
etcd_ctl.run(write_cmd, check=True, env={"ETCDCTL_API": "3"})
read_cmd = ["get", "--print-value-only=true", key]
p = etcd_ctl.run(read_cmd, check=True, env={"ETCDCTL_API": "3"})
output = p.stdout.decode('ascii').strip()
assert value == output
| apache-2.0 |
hyperized/ansible | lib/ansible/modules/network/aci/mso_schema_template.py | 25 | 6776 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template
short_description: Manage templates in schemas
description:
- Manage templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
tenant:
description:
- The tenant used for this template.
type: str
required: yes
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- Due to restrictions of the MSO REST API this module creates schemas when needed, and removes them when the last template has been removed.
seealso:
- module: mso_schema
- module: mso_schema_site
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new template to a schema
mso_schema_template:
host: mso_host
username: admin
password: SomeSecretPassword
tenant: Tenant 1
schema: Schema 1
template: Template 1
state: present
delegate_to: localhost
- name: Remove a template from a schema
mso_schema_template:
host: mso_host
username: admin
password: SomeSecretPassword
tenant: Tenant 1
schema: Schema 1
template: Template 1
state: absent
delegate_to: localhost
- name: Query a template
mso_schema_template:
host: mso_host
username: admin
password: SomeSecretPassword
tenant: Tenant 1
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
- name: Query all templates
mso_schema_template:
host: mso_host
username: admin
password: SomeSecretPassword
tenant: Tenant 1
schema: Schema 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
tenant=dict(type='str', required=True),
schema=dict(type='str', required=True),
template=dict(type='str', aliases=['name']),
display_name=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['template']],
['state', 'present', ['template']],
],
)
tenant = module.params['tenant']
schema = module.params['schema']
template = module.params['template']
display_name = module.params['display_name']
state = module.params['state']
mso = MSOModule(module)
# Get schema
schema_obj = mso.get_obj('schemas', displayName=schema)
mso.existing = {}
if schema_obj:
# Schema exists
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template:
if template in templates:
template_idx = templates.index(template)
mso.existing = schema_obj['templates'][template_idx]
else:
mso.existing = schema_obj['templates']
else:
schema_path = 'schemas'
if state == 'query':
if not mso.existing:
if template:
mso.fail_json(msg="Template '{0}' not found".format(template))
else:
mso.existing = []
mso.exit_json()
template_path = '/templates/{0}'.format(template)
ops = []
mso.previous = mso.existing
if state == 'absent':
mso.proposed = mso.sent = {}
if not schema_obj:
# There was no schema to begin with
pass
elif len(templates) == 1:
# There is only one tenant, remove schema
mso.existing = {}
if not module.check_mode:
mso.request(schema_path, method='DELETE')
elif mso.existing:
# Remove existing template
mso.existing = {}
ops.append(dict(op='remove', path=template_path))
else:
# There was no template to begin with
pass
elif state == 'present':
tenant_id = mso.lookup_tenant(tenant)
if display_name is None:
display_name = mso.existing.get('displayName', template)
if not schema_obj:
# Schema does not exist, so we have to create it
payload = dict(
displayName=schema,
templates=[dict(
name=template,
displayName=display_name,
tenantId=tenant_id,
)],
sites=[],
)
mso.existing = payload['templates'][0]
if not module.check_mode:
mso.request(schema_path, method='POST', data=payload)
elif mso.existing:
# Template exists, so we have to update it
payload = dict(
name=template,
displayName=display_name,
tenantId=tenant_id,
)
mso.sanitize(payload, collate=True)
ops.append(dict(op='replace', path=template_path + '/displayName', value=display_name))
ops.append(dict(op='replace', path=template_path + '/tenantId', value=tenant_id))
mso.existing = mso.proposed
else:
# Template does not exist, so we have to add it
payload = dict(
name=template,
displayName=display_name,
tenantId=tenant_id,
)
mso.sanitize(payload, collate=True)
ops.append(dict(op='add', path='/templates/-', value=payload))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
tiagochiavericosta/edx-platform | common/djangoapps/student/migrations/0003_auto__add_usertestgroup.py | 188 | 8942 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserTestGroup'
db.create_table('student_usertestgroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('student', ['UserTestGroup'])
# Adding M2M table for field users on 'UserTestGroup'
db.create_table('student_usertestgroup_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usertestgroup', models.ForeignKey(orm['student.usertestgroup'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('student_usertestgroup_users', ['usertestgroup_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'UserTestGroup'
db.delete_table('student_usertestgroup')
# Removing M2M table for field users on 'UserTestGroup'
db.delete_table('student_usertestgroup_users')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
2014c2g14/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/xml/sax/_exceptions.py | 359 | 4785 | """Different kinds of SAX Exceptions"""
import sys
if sys.platform[:4] == "java":
from java.lang import Exception
del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
| gpl-2.0 |
wildjan/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 743 | 1141 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
| apache-2.0 |
aleksandra-tarkowska/django | django/db/migrations/autodetector.py | 1 | 45238 | from __future__ import unicode_literals
import re
import datetime
from django.utils import six
from django.db import models
from django.conf import settings
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.optimizer import MigrationOptimizer
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps)
changes = self.arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct'):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
dict(
(key, self.deep_deconstruct(value))
for key, value in kwargs.items()
),
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.render(ignore_swappable=True)
self.new_apps = self.to_state.render()
self.old_model_keys = []
self.old_proxy_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if model._meta.managed and al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if model._meta.managed and (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(field_name)[0]
if hasattr(old_field, "rel") and getattr(old_field.rel, "through", None) and not old_field.rel.through._meta.auto_created:
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.object_name.lower(),
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_created_models()
self.generate_deleted_models()
self.generate_created_proxies()
self.generate_deleted_proxies()
self.generate_altered_options()
# Generate field operations
self.generate_added_fields()
self.generate_removed_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
for i in range(10000):
found = False
for i, op in enumerate(ops):
for dep in op._auto_deps:
if dep[0] == app_label:
# Alright, there's a dependency on the same app.
for j, op2 in enumerate(ops):
if self.check_dependency(op2, dep) and j > i:
ops = ops[:i] + ops[i + 1:j + 1] + [op] + ops[j + 1:]
found = True
break
if found:
break
if found:
break
if not found:
break
else:
raise ValueError("Infinite loop caught in operation dependency resolution")
self.generated_operations[app_label] = ops
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
if dep[0] == "__setting__":
operation_dependencies.add((dep[0], dep[1]))
elif dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if self.migrations.get(dep[0], None):
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a __first__ dependency,
# but only if we've already been through once and checked everything
if chop_mode:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies")
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Checks if an operation dependency matches an operation.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name.lower() == dependency[1].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name.lower() == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models and make creation operations for them,
and separate operations to create any foreign key or M2M relationships
(we'll optimise these back in later if we can)
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together)
"""
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models, key=self.swappable_first_key):
model_state = self.to_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
for field in self.new_apps.get_model(app_label, model_name)._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in self.new_apps.get_model(app_label, model_name)._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
),
dependencies=dependencies,
)
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added_proxies = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added_proxies):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, False))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models and make creation operations for them,
and separate operations to delete any foreign key or M2M relationships
(we'll optimise these back in later if we can)
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models.
"""
deleted_models = set(self.old_model_keys) - set(self.new_model_keys)
for app_label, model_name in sorted(deleted_models):
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.get_all_related_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for related_object in model._meta.get_all_related_many_to_many_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name.lower()), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted_proxies = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted_proxies):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_added_fields(self):
# New fields
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
found_rename = False
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default() and not isinstance(field, models.ManyToManyField):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
)
)
else:
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
)
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an order_with_respect_to;
# this is safely ignored if there isn't one
dependencies=[(app_label, model_name, field_name, "order_wrt_unset")],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
new_model_state = self.to_state.models[app_label, model_name]
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(old_field_name)[0]
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.object_name.lower(),
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get(option_name) != new_model_state.options.get(option_name):
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_model_state.options.get(option_name)}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(set(self.new_proxy_keys).intersection(self.old_proxy_keys))
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in self.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in self.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get("order_with_respect_to", None) != new_model_state.options.get("order_with_respect_to", None):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| bsd-3-clause |
Immortalin/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_check.py | 47 | 3541 | """Tests for distutils.command.check."""
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
def test_check_document(self):
if not HAS_DOCUTILS: # won't test without docutils
return
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
def test_check_restructuredtext(self):
if not HAS_DOCUTILS: # won't test without docutils
return
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest
metadata['long_description'] = 'title\n=====\n\ntest'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
hezuoguang/ZGVL | WLServer/site-packages/django/contrib/sessions/backends/base.py | 104 | 10902 | from __future__ import unicode_literals
import base64
from datetime import datetime, timedelta
import logging
import string
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.crypto import constant_time_compare
from django.utils.crypto import get_random_string
from django.utils.crypto import salted_hmac
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_by_path
from django.contrib.sessions.exceptions import SuspiciousSession
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_by_path(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _get_session_key(self):
return self._session_key
session_key = property(_get_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError
| apache-2.0 |
hadesbox/luigi | test/hadoop_test.py | 2 | 11156 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import unittest
import luigi
import luigi.format
import luigi.hadoop
import luigi.hdfs
import luigi.mrrunner
import luigi.notifications
import minicluster
import mock
from luigi.mock import MockTarget
from nose.plugins.attrib import attr
luigi.notifications.DEBUG = True
luigi.hadoop.attach(minicluster)
class OutputMixin(luigi.Task):
use_hdfs = luigi.BoolParameter(default=False)
def get_output(self, fn):
if self.use_hdfs:
return luigi.hdfs.HdfsTarget('/tmp/' + fn, format=luigi.format.get_default_format() >> luigi.hdfs.PlainDir)
else:
return MockTarget(fn)
class HadoopJobTask(luigi.hadoop.JobTask, OutputMixin):
def job_runner(self):
if self.use_hdfs:
return minicluster.MiniClusterHadoopJobRunner()
else:
return luigi.hadoop.LocalJobRunner()
class Words(OutputMixin):
def output(self):
return self.get_output('words')
def run(self):
f = self.output().open('w')
f.write('kj kj lkj lkj ljoi j iljlk jlk jlk jk jkl jlk jlkj j ioj ioj kuh kjh\n')
f.write('kjsfsdfkj sdjkf kljslkj flskjdfj jkkd jjfk jk jk jk jk jk jklkjf kj lkj lkj\n')
f.close()
class WordCountJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
self.incr_counter('word', word, 1)
yield word, 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('wordcount')
class WordFreqJob(HadoopJobTask):
def init_local(self):
self.n = 0
for line in self.input_local().open('r'):
word, count = line.strip().split()
self.n += int(count)
def mapper(self, line):
for word in line.strip().split():
yield word, 1.0 / self.n
def combiner(self, word, occurrences):
yield word, sum(occurrences)
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires_local(self):
return WordCountJob(self.use_hdfs)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-2')
def extra_files(self):
fn = os.listdir('.')[0] # Just return some file, doesn't matter which
return [(fn, 'my_dir/my_file')]
def init_remote(self):
f = open('my_dir/my_file') # make sure it exists
class MapOnlyJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
yield (word,)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-3')
class UnicodeJob(HadoopJobTask):
def mapper(self, line):
yield u'test', 1
yield b'test', 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-4')
class FailingJobException(Exception):
pass
class FailingJob(HadoopJobTask):
def init_hadoop(self):
raise FailingJobException('failure')
def output(self):
return self.get_output('failing')
class MyStreamingJob(luigi.hadoop.JobTask):
param = luigi.Parameter()
def read_wordcount_output(p):
count = {}
for line in p.open('r'):
k, v = line.strip().split()
count[k] = v
return count
class CommonTests(object):
@staticmethod
def test_run(test_case):
job = WordCountJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertEqual(int(c['jk']), 6)
@staticmethod
def test_run_2(test_case):
job = WordFreqJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertAlmostEquals(float(c['jk']), 6.0 / 33.0)
@staticmethod
def test_map_only(test_case):
job = MapOnlyJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line.strip())
test_case.assertEqual(c[0], 'kj')
test_case.assertEqual(c[4], 'ljoi')
@staticmethod
def test_unicode_job(test_case):
job = UnicodeJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line)
# Make sure unicode('test') isnt grouped with str('test')
# Since this is what happens when running on cluster
test_case.assertEqual(len(c), 2)
test_case.assertEqual(c[0], "test\t2\n")
test_case.assertEqual(c[0], "test\t2\n")
@staticmethod
def test_failing_job(test_case):
job = FailingJob(use_hdfs=test_case.use_hdfs)
success = luigi.build([job], local_scheduler=True)
test_case.assertFalse(success)
class MapreduceLocalTest(unittest.TestCase):
use_hdfs = False
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
def test_unicode_job(self):
CommonTests.test_unicode_job(self)
def test_failing_job(self):
CommonTests.test_failing_job(self)
def test_instantiate_job(self):
# See https://github.com/spotify/luigi/issues/738
MyStreamingJob('param_value')
def setUp(self):
MockTarget.fs.clear()
@attr('minicluster')
class MapreduceIntegrationTest(minicluster.MiniClusterTestCase):
""" Uses the Minicluster functionality to test this against Hadoop """
use_hdfs = True
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
# TODO(erikbern): some really annoying issue with minicluster causes
# test_unicode_job to hang
def test_failing_job(self):
CommonTests.test_failing_job(self)
class CreatePackagesArchive(unittest.TestCase):
def setUp(self):
sys.path.append(os.path.join('test', 'create_packages_archive_root'))
def tearDown(self):
sys.path.remove(os.path.join('test', 'create_packages_archive_root'))
def _assert_module(self, add):
add.assert_called_once_with('test/create_packages_archive_root/module.py',
'module.py')
def _assert_package(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule.py', 'package/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_with_absolute_import.py', 'package/submodule_with_absolute_import.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_without_imports.py', 'package/submodule_without_imports.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package.egg-info/top_level.txt', 'package.egg-info/top_level.txt')
assert add.call_count == 7
def _assert_package_subpackage(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
assert add.call_count == 3
@mock.patch('tarfile.open')
def test_create_packages_archive_module(self, tar):
module = __import__("module", None, None, 'dummy')
luigi.hadoop.create_packages_archive([module], '/dev/null')
self._assert_module(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package(self, tar):
package = __import__("package", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule(self, tar):
package_submodule = __import__("package.submodule", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package_submodule], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_with_absolute_import(self, tar):
package_submodule_with_absolute_import = __import__("package.submodule_with_absolute_import", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package_submodule_with_absolute_import], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_without_imports(self, tar):
package_submodule_without_imports = __import__("package.submodule_without_imports", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package_submodule_without_imports], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage(self, tar):
package_subpackage = __import__("package.subpackage", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package_subpackage], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage_submodule(self, tar):
package_subpackage_submodule = __import__("package.subpackage.submodule", None, None, 'dummy')
luigi.hadoop.create_packages_archive([package_subpackage_submodule], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
if __name__ == '__main__':
HadoopJobTest.test_run_real()
| apache-2.0 |
lihui7115/ChromiumGStreamerBackend | third_party/protobuf/python/google/protobuf/internal/message_test.py | 224 | 22295 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Lib/idlelib/macosxSupport.py | 2 | 4656 | """
A number of function that enhance IDLE on MacOSX when it used as a normal
GUI application (as opposed to an X11 application).
"""
import sys
import Tkinter
def runningAsOSXApp():
"""
Returns True if Python is running from within an app on OSX.
If so, assume that Python was built with Aqua Tcl/Tk rather than
X11 Tck/Tk.
"""
return (sys.platform == 'darwin' and '.app' in sys.executable)
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respont to open AppleEvents, which
makes is feaseable to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except Tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that's more appropriate for
IDLE.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from Tkinter import Menu, Text, Text
from EditorWindow import prepstr, get_accelerator
import Bindings
import WindowList
from MultiCall import MultiCallCreator
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
def about_dialog(event=None):
import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
import configDialog
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
###check if Tk version >= 8.4.14; if so, use hard-coded showprefs binding
tkversion = root.tk.eval('info patchlevel')
# Note: we cannot check if the string tkversion >= '8.4.14', because
# the string '8.4.7' is greater than the string '8.4.14'.
if tuple(map(int, tkversion.split('.'))) >= (8, 4, 14):
Bindings.menudefs[0] = ('application', [
('About IDLE', '<<about-idle>>'),
None,
])
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
else:
for mname, entrylist in Bindings.menudefs:
menu = menudict.get(mname)
if not menu:
continue
else:
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
underline, label = prepstr(label)
accelerator = get_accelerator(Bindings.default_keydefs,
eventname)
def command(text=root, eventname=eventname):
text.event_generate(eventname)
menu.add_command(label=label, underline=underline,
command=command, accelerator=accelerator)
def setupApp(root, flist):
"""
Perform setup for the OSX application bundle.
"""
if not runningAsOSXApp(): return
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.