repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
darkwing/kuma | vendor/packages/mock/tests/testpatch.py | Python | mpl-2.0 | 55,260 | 0.002678 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest2 as unittest
from mock.tests import support
from mock.tests.support import inPy3k, SomeClass, is_instance, callable
from mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch,
DEFAULT, call
)
from mock.mock import _patch, _get_target
builtin_string = '__builtin__'
if inPy3k:
builtin_string = 'builtins'
unicode = str
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name | ]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
| attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self. |
simonluijk/django-blog | setup.py | Python | bsd-3-clause | 2,546 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__ | version__` in `init.py`. |
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('blog')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-blog-sl',
version=version,
url='http://github.com/simonluijk/django-blog',
license='BSD',
description='Yet another django blog.',
author='Simon Luijk',
author_email='simon@simonluijk.com', # SEE NOTE BELOW (*)
packages=get_packages('blog'),
package_data=get_package_data('blog'),
test_suite='blog.runtests.runtests.main',
install_requires=['django-mptt', 'django-markdown-deux'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
]
)
# (*) Please direct queries to the discussion group, rather than to me directly
# Doing so helps ensure your question is helpful to other users.
# Queries directly to my email are likely to receive a canned response.
#
# Many thanks for your understanding.
|
ghofranehr/foobar | manage.py | Python | bsd-3-clause | 1,092 | 0.002747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
from foobar.app import create_app
from foobar.user.models import User
from foobar.settings import DevConfig, ProdConfig
from foobar.database import db
if os.environ.get("FOOBAR_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def t | est():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', M | igrateCommand)
if __name__ == '__main__':
manager.run()
|
davidedmundson/telepathy-hangups | telepathy/interfaces.py | Python | lgpl-2.1 | 1,435 | 0.000697 | # telepathy-python - Base classes defining the interfaces of the Telepathy framework
#
# Copyright (C) 2005, 2006 Collabora Limited
# Copyright (C) 2005, 2006 Nokia Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from telepathy._generated.interfaces import *
# Backwards compatibility
CONN_MGR_INTERFACE = CONNECTION_MANAGER
CO | NN_INTERFACE = CONNECTION
CHANNEL_INTERFACE = CHANNEL
CHANNEL_HANDLER_INTERFACE = CHANNEL_HANDLER
# More backwards compatibility
CONN_INTERFACE_ALIASING = CONNECTION_INTERFACE_ALIASING
CONN_INTERFACE_AVATARS = CONNECTION_INTERFACE_AV | ATARS
CONN_INTERFACE_CAPABILITIES = CONNECTION_INTERFACE_CAPABILITIES
CONN_INTERFACE_PRESENCE = CONNECTION_INTERFACE_PRESENCE
CONN_INTERFACE_RENAMING = CONNECTION_INTERFACE_RENAMING
|
UMDIEEE/ieee-web | migrations/versions/5ddd839de57c_.py | Python | gpl-3.0 | 312 | 0.00641 | """empty message
Revision ID: 5ddd839de57c
Revises: 0b1bd71fb58d
Create Date: 2016-02-22 07:40:21.142754
"""
# revision identifiers, used by Alembic.
revision = '5ddd839de57c'
down_revision = '0b1bd71fb58d'
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pas | s
| |
Arno-Nymous/pyload | module/plugins/internal/Crypter.py | Python | gpl-3.0 | 3,435 | 0.001164 | # -*- coding: utf-8 -*-
from .Base import Base
from .misc import parse_name, safename
class Crypter(Base):
__name__ = "Crypter"
__type__ = "crypter"
__version__ = "0.20"
__status__ = "stable"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default")]
__description__ = """Base decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def init_base(self):
#: Put all packages here. It's a list of tuples like: ( name, [list of links], folder )
self.packages = []
self.links = [] #: List of urls, pyLoad will generate packagenames
def setup_base(self):
self.packages = []
self.links = []
def process(self, pyfile):
self.decrypt(pyfile)
if self.links:
self._generate_packages()
elif not self.packages:
self.error(_("No link grabbed"), "decrypt")
self._create_packages()
def decrypt(self, pyfile):
"""
The "main" method of every crypter plugin, you **have to** overwrite it
"""
raise NotImplementedError
def _generate_packages(self):
"""
Generate new packages from self.links
"""
name = self.info['pattern'].get("N")
if name is None:
links = map(self.fixurl, self.links)
pdict = self.pyload.api.generatePackages(links)
packages = [(_name, _links, parse_name(_name))
for _name, _links in pdict.items()]
else:
packages = [(name, self.links, parse_name(name))]
self.packages.extend(packages)
| def _create_packages(self):
"""
Create new packages from self.packages
"""
pack_folder = self.pyfile.package().folder
pack_password = self.pyfile.package().password
pack_queue = self.pyfile.packag | e().queue
folder_per_package = self.config.get('folder_per_package', "Default")
if folder_per_package == "Default":
folder_per_package = self.pyload.config.get(
'general', 'folder_per_package')
else:
folder_per_package = folder_per_package == "Yes"
for name, links, folder in self.packages:
self.log_info(_("Create package: %s") % name,
_("%d links") % len(links))
links = map(self.fixurl, links)
self.log_debug("LINKS for package " + name, *links)
pid = self.pyload.api.addPackage(name, links, pack_queue)
if pack_password:
self.pyload.api.setPackageData(
pid, {'password': pack_password})
#: Workaround to do not break API addPackage method
set_folder = lambda x: self.pyload.api.setPackageData(
pid, {'folder': safename(x or "")})
if not folder_per_package:
folder = pack_folder
elif not folder or folder == name:
folder = parse_name(name)
self.log_info(_("Save package `%(name)s` to folder: %(folder)s")
% {'name': name, 'folder': folder})
set_folder(folder)
|
jcass77/mopidy | tests/audio/test_actor.py | Python | apache-2.0 | 22,260 | 0.00009 | import threading
import unittest
from unittest import mock
import pykka
from mopidy import audio
from mopidy.audio.constants import PlaybackState
from mopidy.internal import path
from mopidy.internal.gi import Gst
from tests import dummy_audio, path_to_data_dir
# We want to make sure both our real audio class and the fake one behave
# correctly. So each test is first run against the real class, then repeated
# against our dummy.
class BaseTest(unittest.TestCase):
uris = [
path.path_to_uri(path_to_data_dir("song1.wav")),
path.path_to_uri(path_to_data_dir("song2.wav")),
]
audio_class = audio.Audio
def setUp(self): # noqa: N802
config = {
"audio": {
"buffer_time": None,
"mixer": "foomixer",
"mixer_volume": None,
"output": "testoutput",
"visualizer": None,
},
"proxy": {"hostname": ""},
}
self.song_uri = path.path_to_uri(path_to_data_dir("song1.wav"))
self.audio = self.audio_class.start(config=config, mixer=None).proxy()
def tearDown(self): # noqa
pykka.ActorRegistry.stop_all()
def possibly_trigger_fake_playback_error(self, uri):
pass
def possibly_trigger_fake_about_to_finish(self):
pass
class DummyMixin:
audio_class = dummy_audio.DummyAudio
def possibly_trigger_fake_playback_error(self, uri):
self.audio.trigger_fake_playback_failure(uri)
def possibly_trigger_fake_about_to_finish(self):
callback = self.audio.get_about_to_finish_callback().get()
if callback:
callback()
class AudioTest(BaseTest):
def test_start_playback_existing_file(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
assert self.audio.start_playback().get()
def test_start_playback_non_existing_file(self):
self.possibly_trigger_fake_playback_error(self.uris[0] + "bogus")
self.audio.prepare_change()
self.audio.set_uri(self.uris[0] + "bogus")
assert not self.audio.start_playback().get()
def test_pause_playback_while_playing(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
assert self.audio.pause_playback().get()
def test_stop_playback_while_playing(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
assert self.audio.stop_playback().get()
@unittest.SkipTest
def test_deliver_data(self):
pass # TODO
@unittest.SkipTest
def test_end_of_data_stream(self):
pass # TODO
@unittest.SkipTest
def test_set_mute(self):
pass # TODO Probably needs a fakemixer with a mixer track
@unittest.SkipTest
def test_set_state_encapsulation(self):
pass # TODO
@unittest.SkipTest
def test_set_position(self):
pass # TODO
@unittest.SkipTest
def test_invalid_output_raises_error(self):
pass # TODO
class AudioDummyTest(DummyMixin, AudioTest):
pass
class DummyAudioListener(pykka.ThreadingActor, audio.AudioListener):
def __init__(self):
super().__init__()
self.events = []
self.waiters = {}
def on_event(self, event, **kwargs):
self.events.append((event, kwargs))
if event in self.waiters:
self.waiters[event].set()
def wait(self, event):
self.waiters[event] = threading.Event()
return self.waiters[event]
def get_events(self):
return self.events
def clear_events(self):
self.events = [ | ]
class AudioEventTest(BaseTest):
def setUp(self): # noqa: N802
super().setUp()
self.audio.enable_sync_handler().get()
self.listener = DummyAudioListener.start().proxy()
def tearDown(self): # noqa: N802
super().tearDown()
def assertEvent(self, event, **kwargs): # noqa: N802
assert (event, kwargs) in self.listener.get_events().get()
def assertNotEvent(self, event, **kwargs): # noqa: N802
ass | ert (event, kwargs) not in self.listener.get_events().get()
# TODO: test without uri set, with bad uri and gapless...
# TODO: playing->playing triggered by seek should be removed
# TODO: codify expected state after EOS
# TODO: consider returning a future or a threading event?
def test_state_change_stopped_to_playing_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.STOPPED,
new_state=PlaybackState.PLAYING,
target_state=None,
)
def test_state_change_stopped_to_paused_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.STOPPED,
new_state=PlaybackState.PAUSED,
target_state=None,
)
def test_state_change_paused_to_playing_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.listener.clear_events()
self.audio.start_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.PAUSED,
new_state=PlaybackState.PLAYING,
target_state=None,
)
def test_state_change_paused_to_stopped_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.listener.clear_events()
self.audio.stop_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.PAUSED,
new_state=PlaybackState.STOPPED,
target_state=None,
)
def test_state_change_playing_to_paused_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.listener.clear_events()
self.audio.pause_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.PLAYING,
new_state=PlaybackState.PAUSED,
target_state=None,
)
def test_state_change_playing_to_stopped_event(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.listener.clear_events()
self.audio.stop_playback()
self.audio.wait_for_state_change().get()
self.assertEvent(
"state_changed",
old_state=PlaybackState.PLAYING,
new_state=PlaybackState.STOPPED,
target_state=None,
)
def test_stream_changed_event_on_playing(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.listener.clear_events()
self.audio.start_playback()
# Since we are going from stopped to playing, the state change is
# enough to ensure the stream changed.
self.audio.wait_for_state_change().get()
self.assertEvent("stream_changed", uri=self.uris[0])
def test_stream_changed_event_on_multiple_changes(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.listener.clear_events()
self.audio.start_playback()
self.audio.wait_for_state_change().get()
self.assertEvent("stream_changed", uri=self.uris[0])
self.audio.prepare_change()
sel |
ucb-sejits/ast_tool_box | test/sample.py | Python | mit | 101 | 0.009901 | impor | t ast
|
def square_it(x):
return x * x
class Transformer1(ast.NodeTransformer):
pass
|
aoyono/sicpy | Chapter2/exercises/exercise2_60.py | Python | mit | 3,259 | 0.002762 | # -*- coding: utf-8 -*-
"""
https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.60
"""
from Chapter2.themes.lisp_list_structured_data import car, cdr, cons, lisp_list, nil, print_lisp_list
from Chapter2.themes.sequences_as_conventional_interfaces import accumulate
def element_of_set(x, set):
"""Tests if x is element of set with a representation of sets that allows duplicates"""
if set is nil:
return False
if x == car(set):
return True
return element_of_set(x, cdr(set))
def adjoin_set(x, set):
"""Adds x to set"""
return cons(x, set)
def union_set(set1, set2):
"""Computes union of set1 and set2"""
return accumulate(adjoin_set, set2, set1)
def intersection_set(set1, set2):
"""Compu | tes intersection of set1 and set2"""
if set1 is nil or set2 is nil:
return nil
if element_of_set(car(set1), set2):
return cons(car(set1), intersection_set(cdr(set1), set2))
return intersection_set(cdr(set1), set2)
def run_the_magic():
s1 = lisp_list(2, 3, 2, 1, 3, 2, 2)
s2 = lisp_list(1, 1, 3)
s3 = lisp_list(1, 2, 3)
print(element_of_set(3, s1))
print_lisp_list(adjoin_set(4, s1))
print_lisp_list(intersec | tion_set(s1, s2))
print_lisp_list(union_set(s1, s2))
from timeit import Timer
t1_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.exercise2_60 import element_of_set')
t2_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import element_of_set')
t1_adjoin = Timer(stmt='adjoin_set(4, %(s1)s)' % locals(), setup='from Chapter2.exercise2_60 import adjoin_set')
t2_adjoin = Timer(stmt='adjoin_set(4, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import adjoin_set')
t1_intersection = Timer(stmt='intersection_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import intersection_set')
t2_intersection = Timer(stmt='intersection_set(%(s1)s, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import intersection_set')
t1_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import union_set')
t2_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_59 import union_set')
header = '-----------Timing for *%s* operation'
def do_timing(timer1, timer2, op_name):
print(header % op_name)
t1 = timer1.timeit()
t2 = timer2.timeit()
print('-> With duplicate: %s' % t1)
print('-> Without duplicate: %s' % t2)
do_timing(t1_element_of, t2_element_of, 'element_of_set')
do_timing(t1_adjoin, t2_adjoin, 'adjoin_set')
do_timing(t2_intersection, t2_intersection, 'intersection_set')
do_timing(t1_union, t2_union, 'union_set')
print('The representation using unordered list with duplicates is better suited for applications where there are '
'many insertions in the data structure')
if __name__ == "__main__":
run_the_magic()
|
chriskiehl/Gooey | gooey/gui/components/widgets/bases.py | Python | mit | 9,873 | 0.001823 | import re
from functools import reduce
from typing import Optional, Callable, Any, Type, Union
import wx # type: ignore
from gooey.gui import formatters, events
from gooey.gui.util import wx_util
from gooey.python_bindings.types import FormField
from gooey.util.functional import getin, ifPresent
from gooey.gui.validators import runValidator
from gooey.gui.components.util.wrapped_static_text import AutoWrappedStaticText
from gooey.gui.components.mouse import notifyMouseEvent
from gooey.python_bindings import types as t
class BaseWidget(wx.Panel):
widget_class: Any
def arrange(self, label, text):
raise NotImplementedError
def getWidget(self, parent: wx.Window, **options):
return self.widget_class(parent, **options)
def connectSignal(self):
raise NotImplementedError
def getSublayout(self, *args, **kwargs):
raise NotImplementedError
def setValue(self, value):
raise NotImplementedError
def setPlaceholder(self, value):
raise NotImplementedError
def receiveChange(self, *args, **kwargs):
raise NotImplementedError
def dispatchChange(self, value, **kwargs):
raise NotImplementedError
def formatOutput(self, metatdata, value):
raise NotImplementedError
class TextContainer(BaseWidget):
# TODO: fix this busted-ass inheritance hierarchy.
# Cracking at the seems for more advanced widgets
# problems:
# - all the usual textbook problems of inheritance
# - assumes there will only ever be ONE w | idget created
# - assumes those widgets are all created in `getWidget`
# - all the above make for extremely awkward lifecycle management
# - no clear point at which binding is correct.
# - I think the core problem here is that I couple the interface
# for shared presentation layout with the specification of
# a behavioral interface
# - This should be | broken apart.
# - presentation can be ad-hoc or composed
# - behavioral just needs a typeclass of get/set/format for Gooey's purposes
widget_class = None # type: ignore
def __init__(self, parent, widgetInfo, *args, **kwargs):
super(TextContainer, self).__init__(parent, *args, **kwargs)
self.info = widgetInfo
self._id = widgetInfo['id']
self.widgetInfo = widgetInfo
self._meta = widgetInfo['data']
self._options = widgetInfo['options']
self.label = wx.StaticText(self, label=widgetInfo['data']['display_name'])
self.help_text = AutoWrappedStaticText(self, label=widgetInfo['data']['help'] or '')
self.error = AutoWrappedStaticText(self, label='')
self.error.Hide()
self.widget = self.getWidget(self)
self.layout = self.arrange(*args, **kwargs)
self.setColors()
self.SetSizer(self.layout)
self.bindMouseEvents()
self.Bind(wx.EVT_SIZE, self.onSize)
# 1.0.7 initial_value should supersede default when both are present
if self._options.get('initial_value') is not None:
self.setValue(self._options['initial_value'])
# Checking for None instead of truthiness means False-evaluaded defaults can be used.
elif self._meta['default'] is not None:
self.setValue(self._meta['default'])
if self._options.get('placeholder'):
self.setPlaceholder(self._options.get('placeholder'))
self.onComponentInitialized()
def onComponentInitialized(self):
pass
def bindMouseEvents(self):
"""
Send any LEFT DOWN mouse events to interested
listeners via pubsub. see: gooey.gui.mouse for background.
"""
self.Bind(wx.EVT_LEFT_DOWN, notifyMouseEvent)
self.label.Bind(wx.EVT_LEFT_DOWN, notifyMouseEvent)
self.help_text.Bind(wx.EVT_LEFT_DOWN, notifyMouseEvent)
self.error.Bind(wx.EVT_LEFT_DOWN, notifyMouseEvent)
self.widget.Bind(wx.EVT_LEFT_DOWN, notifyMouseEvent)
def arrange(self, *args, **kwargs):
wx_util.make_bold(self.label)
wx_util.withColor(self.label, self._options['label_color'])
wx_util.withColor(self.help_text, self._options['help_color'])
wx_util.withColor(self.error, self._options['error_color'])
self.help_text.SetMinSize((0,-1))
layout = wx.BoxSizer(wx.VERTICAL)
if self._options.get('show_label', True):
layout.Add(self.label, 0, wx.EXPAND)
else:
self.label.Show(False)
layout.AddStretchSpacer(1)
layout.AddSpacer(2)
if self.help_text and self._options.get('show_help', True):
layout.Add(self.help_text, 1, wx.EXPAND)
layout.AddSpacer(2)
else:
self.help_text.Show(False)
layout.AddStretchSpacer(1)
layout.Add(self.getSublayout(), 0, wx.EXPAND)
layout.Add(self.error, 1, wx.EXPAND)
# self.error.SetLabel("HELLOOOOO??")
# self.error.Show()
# print(self.error.Shown)
return layout
def setColors(self):
wx_util.make_bold(self.label)
wx_util.withColor(self.label, self._options['label_color'])
wx_util.withColor(self.help_text, self._options['help_color'])
wx_util.withColor(self.error, self._options['error_color'])
if self._options.get('label_bg_color'):
self.label.SetBackgroundColour(self._options.get('label_bg_color'))
if self._options.get('help_bg_color'):
self.help_text.SetBackgroundColour(self._options.get('help_bg_color'))
if self._options.get('error_bg_color'):
self.error.SetBackgroundColour(self._options.get('error_bg_color'))
def getWidget(self, *args, **options):
return self.widget_class(*args, **options)
def getWidgetValue(self):
raise NotImplementedError
def getSublayout(self, *args, **kwargs):
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(self.widget, 1, wx.EXPAND)
return layout
def onSize(self, event):
# print(self.GetSize())
# self.error.Wrap(self.GetSize().width)
# self.help_text.Wrap(500)
# self.Layout()
event.Skip()
def getUiState(self) -> t.FormField:
return t.TextField(
id=self._id,
type=self.widgetInfo['type'],
value=self.getWidgetValue(),
placeholder=self.widget.widget.GetHint(),
error=self.error.GetLabel().replace('\n', ' '),
enabled=self.IsEnabled(),
visible=self.IsShown()
)
def syncUiState(self, state: FormField): # type: ignore
self.widget.setValue(state['value']) # type: ignore
self.error.SetLabel(state['error'] or '')
self.error.Show(state['error'] is not None and state['error'] is not '')
def getValue(self) -> t.FieldValue:
regexFunc: Callable[[str], bool] = lambda x: bool(re.match(userValidator, x))
userValidator = getin(self._options, ['validator', 'test'], 'True')
message = getin(self._options, ['validator', 'message'], '')
testFunc = regexFunc \
if getin(self._options, ['validator', 'type'], None) == 'RegexValidator'\
else eval('lambda user_input: bool(%s)' % userValidator)
satisfies = testFunc if self._meta['required'] else ifPresent(testFunc)
value = self.getWidgetValue()
return t.FieldValue( # type: ignore
id=self._id,
cmd=self.formatOutput(self._meta, value),
meta=self._meta,
rawValue= value,
# type=self.info['type'],
enabled=self.IsEnabled(),
visible=self.IsShown(),
test= runValidator(satisfies, value),
error=None if runValidator(satisfies, value) else message,
clitype=('positional'
if self._meta['required'] |
KlubJagiellonski/Politikon | events/tests.py | Python | gpl-2.0 | 41,180 | 0.001142 | # -*- coding: utf-8 -*-
"""
Test events module
"""
from datetime import timedelta
from freezegun import freeze_time
import json
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from .exceptions import NonexistantEvent, PriceMismatch, EventNotInProgress, UnknownOutcome, \
InsufficientCash, InsufficientBets
from .factories import EventFactory, ShortEventFactory, BetFactory, TransactionFactory
from .models import Bet, Event, Transaction
from .tasks import create_open_events_snapshot, calculate_price_change
from .templatetags.display import render_bet, render_event, render_events, render_featured_event, \
render_featured_events, render_bet_status, outcome, render_finish_date, og_title
from accounts.factories import UserFactory
from accounts.models import UserProfile
from constance import config
from politikon.templatetags.path import startswith
class EventsModelTestCase(TestCase):
"""
Test methods for event
"""
def test_event_creation(self):
"""
Create event with minimal attributes
"""
event = ShortEventFactory()
self.assertIsInstance(event, Event)
def test_event_with_attributes(self):
"""
Create event with all attributes
"""
event = EventFactory()
self.assertIsInstance(event, Event)
self.assertEqual(u'Długi tytuł testowego wydarzenia', event.title)
self.assertEqual(u'Długi tytuł testowego wydarzenia',
event.__unicode__())
self.assertEqual('/event/1-dlugi-tytul-testowego-wydarzenia',
event.get_relative_url())
# TODO rename to politikon.org.pl
# TODO: FIXME
# self.assertEqual('http://example.com/event/1-a', event.get_absolute_url())
self.assertTrue(event.is_in_progress)
self.assertEqual('event_1', event.publish_channel)
self.assertEqual({
'event_id': 1,
'buy_for_price': 50,
'buy_against_price': 50,
'sell_for_price': 50,
'sell_against_price': 50
}, event.event_dict)
outcome1 = event.price_for_outcome(Bet.YES, Bet.BUY)
self.assertEqual(event.current_buy_for_price, outcome1)
outcome2 = even | t.price_for_outcome(Bet.YES, Bet.SELL)
self.assertEqual(event.current_sell_for_price, outcome2)
outcome3 = event.price_for_outcome(Bet.NO)
self.assertEqual(event.current_buy_against_price, outcome3)
outcome4 = event.price_for_outcome(Bet.NO, Bet.SELL)
self.assertEqual(event.current_sell_against_price, outcome4)
with self.assertRaises(UnknownOutcome):
event.price_for_outcome('OO | OPS', 'MY MISTAKE')
# TODO: FIXME
# teraz nie interpolujemy punktów ;(
# @override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
# CELERY_ALWAYS_EAGER=True,
# BROKER_BACKEND='memory')
# def test_get_chart_points(self):
# """
# Get chart points
# """
# # time of get_chart_points
# initial_time = timezone.now().\
# replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=15)
# with freeze_time(initial_time) as frozen_time:
# event1 = EventFactory()
# event1.current_buy_for_price = 90
# event1.save()
#
# create_open_events_snapshot.delay()
# frozen_time.tick(delta=timedelta(days=3))
#
# event1.current_buy_for_price = 30
# event1.save()
# event2 = EventFactory()
# event2.current_buy_for_price = 30
# event2.save()
#
# create_open_events_snapshot.delay()
# frozen_time.tick(delta=timedelta(days=5))
#
# event1.current_buy_for_price = 60
# event1.save()
# event2.current_buy_for_price = 60
# event2.save()
# event3 = EventFactory()
#
# create_open_events_snapshot.delay()
# frozen_time.tick(delta=timedelta(days=2))
#
# event1.current_buy_for_price = 55
# event1.save()
# event2.current_buy_for_price = 55
# event2.save()
# event3.current_buy_for_price = 55
# event3.save()
#
# create_open_events_snapshot.delay()
# frozen_time.tick(delta=timedelta(days=2))
#
# event1.current_buy_for_price = 82
# event1.save()
# event2.current_buy_for_price = 82
# event2.save()
# event3.current_buy_for_price = 82
# event3.save()
#
# create_open_events_snapshot.delay()
# frozen_time.tick(delta=timedelta(days=2))
# event3.finish_yes()
# event3.save()
#
# # no snapshot now
# frozen_time.tick(delta=timedelta(days=1))
# event1.current_buy_for_price = 0
# event1.save()
# event2.current_buy_for_price = 0
# event2.save()
#
# create_open_events_snapshot.delay()
#
# # time of caculate_price_change task
# # TODO: do this better
# short_range = Event.EVENT_SMALL_CHART_DAYS
# first_date = timezone.now() - timedelta(days=short_range-1)
# days = [first_date + timedelta(n) for n in range(short_range)]
# labels = [
# u'{0} {1}'.format(step_date.day, _(step_date.strftime('%B'))) for step_date in days
# ]
#
# long_range = Event.EVENT_BIG_CHART_DAYS
# first_date2 = timezone.now() - timedelta(days=long_range-1)
# days2 = [first_date2 + timedelta(n) for n in range(long_range)]
# labels2 = [
# u'{0} {1}'.format(step_date.day, _(step_date.strftime('%B'))) for step_date in days2
# ]
#
# margin = [Event.BEGIN_PRICE] * Event.CHART_MARGIN
# mlen = len(margin)
# points1 = [90, 90, 90, 30, 30, 30, 30, 30, 60, 60, 55, 55, 82, 82, 82, 0]
# points2 = [30, 30, 30, 30, 30, 60, 60, 55, 55, 82, 82, 82, 0]
# points3 = [Event.BEGIN_PRICE, Event.BEGIN_PRICE, 55, 55, 82, 82, 82]
# self.assertEqual({
# 'id': 1,
# 'labels': labels,
# 'points': points1[2:]
# }, event1.get_event_small_chart())
# self.assertEqual({
# 'id': 1,
# # labels 3 ends one day earlier
# 'labels': labels2[long_range-mlen-len(points1):],
# 'points': margin + points1
# }, event1.get_event_big_chart())
# self.assertEqual({
# 'id': 2,
# 'labels': labels,
# 'points': [Event.BEGIN_PRICE] + points2
# }, event2.get_event_small_chart())
# self.assertEqual({
# 'id': 2,
# 'labels': labels2[long_range-mlen-len(points2):],
# 'points': margin + points2
# }, event2.get_event_big_chart())
# self.assertEqual({
# 'id': 3,
# # labels 3 ends one day earlier
# 'labels': labels[short_range-1-mlen-len(points3):short_range-1],
# 'points': margin + points3
# }, event3.get_event_small_chart())
# self.assertEqual({
# 'id': 3,
# # labels 3 ends one day earlier
# 'labels': labels2[long_range-1-mlen-len(points3):long_range-1],
# 'points': margin + points3
# }, event3.get_event_big_chart())
def test_get_bet_social(self):
"""
Get bet social
"""
event = EventFactory()
users_yes = UserFactory.create_batch(10)
users_no = UserFactory.create_batch(10)
bets_yes = [BetFactory(user=u, event=event) for u in users_yes]
bets_no = [BetFactory(user=u, event=event, outcome=Bet.NO) for u in users_no]
self.maxDiff = None
social = event.get_bet_s |
shakamunyi/neutron-vrrp | neutron/neutron_plugin_base_v2.py | Python | apache-2.0 | 14,707 | 0 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NeutronPluginBaseV2(object):
@abc.abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abc.abstractmethod
def create_network(self, context, netw | ork):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_M | AP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
|
JessicaGarson/MovieSentiment | baggedmovies.py | Python | unlicense | 1,258 | 0.011129 | #bagging to make it better
import pandas as pd
import numpy as np
from ggplot import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import Logis | ticRegression
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import auc_score
train = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/train.csv')
test = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/test.csv')
def bagmodel(s):
vectorizer = CountVectorizer()
X_dict = vectorizer.fit_transform(s.Phrase)
choices = np.random.choice(range(len(s)), len(s), replace = True)
s = s.ix[choic | es,:]
X_train = vectorizer.transform(s.Phrase)
model = LogisticRegression().fit(X_train, list(s.Sentiment))
return model
models = []
for i in range(5):
print i
models.append(bagmodel(train))
from collections import Counter
def combination(s):
thing = Counter(s)
return thing.most_common(1)[0]
combination([3,3,2,3,3,])
result_final = []
for i in range(len(test)):
a, b = combination([x[i] for x in result])
result_final.append(a)
result_final[0]
solution = pd.DataFrame({'PhraseId': test.PhraseId, 'Sentiment': result_final})
solution.to_csv('submissionbagged.csv', index=False) |
msultan/msmbuilder | msmbuilder/tests/test_vmhmm.py | Python | lgpl-2.1 | 5,110 | 0.000196 | from __future__ import print_function, division
import random
from itertools import permutations
import numpy as np
from scipy.stats.distributions import vonmises
import pickle
import tempfile
from sklearn.pipeline import Pipeline
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import DihedralFeaturizer
from msmbuilder.hmm import VonMisesHMM
def test_code_works():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def circwrap(x):
"""Wrap an array on (-pi, pi)"""
return x - 2 * np.pi * np.floor(x / (2 * np.pi) + 0.5)
def create_timeseries(means, kappas, transmat):
"""Construct a random timeseries based on a specified Markov model."""
numStates = len(means)
state = random.randint(0, numStates - 1)
cdf = np.cumsum(transmat, 1)
numFrames = 1000
X = np.empty((numFrames, 1))
for i in range(numFrames):
rand = random.random()
state = (cdf[state] > rand).argmax()
X[i, 0] = circwrap(vonmises.rvs(kappas[state], means[state]))
return X
def validate_timeseries(means, kappas, transmat, model, meantol,
kappatol, transmattol):
"""Test our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(circwrap(means[i] - model.means_[order[i]])) > meantol:
match = False
break
if abs(kappas[i] - model.kappas_[order[i]]) > k | appatol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means | = np.array([[0.0], [2.0]])
kappas = np.array([[4.0], [8.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=2, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [2.0], [4.0]])
kappas = np.array([[8.0], [8.0], [6.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=3, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_pipeline():
trajs = AlanineDipeptide().get_cached().trajectories
p = Pipeline([
('diheds', DihedralFeaturizer(['phi', 'psi'], sincos=False)),
('hmm', VonMisesHMM(n_states=4))
])
predict = p.fit_predict(trajs)
p.named_steps['hmm'].summarize()
def test_pickle():
"""Test pickling an HMM"""
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
logprob, hidden = hmm.predict(sequences)
with tempfile.TemporaryFile() as savefile:
pickle.dump(hmm, savefile)
savefile.seek(0, 0)
hmm2 = pickle.load(savefile)
logprob2, hidden2 = hmm2.predict(sequences)
assert(logprob == logprob2) |
maleficarium/youtube-dl | youtube_dl/extractor/glide.py | Python | unlicense | 1,988 | 0.002012 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_strdate
class GlideIE(InfoExtractor):
IE_DESC = 'Glide mobile video messages (glide.me)'
_VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
_TEST = {
'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
'md5': '4466372687352851af2d131cfaa8a4c7',
'info_dict': {
'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
'ext': 'mp4',
'title': 'Damon Timm\'s Glide message',
'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
'uploader': 'Damon Timm',
'upload_date': '20140919',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'title')
video_url = self._proto_relative_url(self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1',
webpage, 'video URL', default=None,
group='url')) or self._og_sea | rch_video_url(webpage)
thumbnail = self._proto_relative_url(self._search_regex(
r'<img[^>]+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P<url>.+?)\1',
webpage, 'thumbnail url', default=None,
group='url')) or self._og_search_thumbnail(webpage)
uploader = self._search_regex(
| r'<div[^>]+class=["\']info-name["\'][^>]*>([^<]+)',
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._search_regex(
r'<div[^>]+class="info-date"[^>]*>([^<]+)',
webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
}
|
quantumlib/Cirq | cirq-core/cirq/protocols/inverse_protocol_test.py | Python | apache-2.0 | 2,069 | 0 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
class NoMethod:
pass
class ReturnsNotImplemented:
def __pow__(self, exponent):
return NotImplemented
class ReturnsFive:
def __pow__(self, exponent) -> int:
return 5
class SelfInverse:
def __pow__(self, exponent) -> 'SelfInverse':
return self
class ImplementsReversible:
def __pow__(self, exponent):
return 6 if exponent == -1 else NotImplemented
class IsIterable:
def __iter__(self):
yield 1
yield 2
@pytest.mark.parametrize(
'val',
(
NoMethod(),
'text',
object(),
ReturnsNotImplemented(),
[NoMethod(), 5],
),
)
def test_objects_with_no_inverse(val):
with pytest.raises(TypeError, match="isn't invertible"):
_ = cirq.inverse(val)
assert cirq.inverse(val, None) is None
assert cirq.inve | rse(val, NotImplemented) is NotImplemented
assert cirq.inverse(val, 5) == 5
@pytest.mark.parametrize(
'val,inv',
(
(ReturnsFive(), 5),
(ImplementsReversible(), 6),
(SelfInverse(),) * 2,
(1, 1),
(2, 0.5),
(1j, -1j),
((), ()),
([], ()),
((2,), (0.5,)),
((1, 2), (0.5, 1)),
((2, (4, 8)), ((0.125, 0.25), 0.5)),
((2, [4, 8]), ((0.125, 0.25), 0.5)),
(IsIterable(), (0.5, 1)),
),
)
def test_obj | ects_with_inverse(val, inv):
assert cirq.inverse(val) == inv
assert cirq.inverse(val, 0) == inv
|
mohawkhq/mohawk-data-platform | data/urls.py | Python | bsd-3-clause | 479 | 0.010438 | from django.conf.urls im | port url, patterns |
from data import views
urlpatterns = patterns("data.views",
url(r"^$", views.IndexView.as_view()),
url(r"^a/(?P<application_external_id>[^/]{,255})\.json$", views.ApplicationInstanceListView.as_view()),
url(r"^(?P<model_external_id>[^/]{,255})\.json$", views.InstanceListView.as_view()),
url(r"^(?P<model_external_id>[^/]{,255})/(?P<instance_external_id>[^/]{,255})\.json", views.InstanceDetailView.as_view()),
) |
hyphyphyph/lascaux | crepehat/kitchen.py | Python | mit | 1,587 | 0.00063 | import os.path
from crepehat import SObject
class Kitchen(SObject):
sources = []
extensions = []
def __init__(self, sources, extensions=None):
if not hasattr(sources, "__iter__"):
s | ources = [sources]
self.sources = sources
if extensions and not hasattr(extensions, "__iter__"):
extensions = [extensions]
self.extensions = extensions |
def get(self, path, extensions=None, overridable=True):
if self.extensions and extensions != False or extensions:
path = os.path.splitext(path)[0]
if not hasattr(extensions, "__iter__"):
extensions = self.extensions
if not overridable:
if extensions:
if os.path.isfile(os.path.join(self.sources[-1],
path+self.extensions[-1])):
return os.path.join(self.sources[-1],
path+self.extensions[-1])
else:
if os.path.isfile(os.path.join(self.sources[-1],
path)):
return os.path.join(self.sources[-1],
path)
return False
for source in self.sources:
j = os.path.join(source, path)
if extensions:
for ext in extensions:
if os.path.isfile(j+ext):
return j+ext
else:
if os.path.isfile(j):
return j
return False
|
ddsc/ddsc-api | ddsc_api/models.py | Python | mit | 216 | 0 | # (c) Nelen & Schuurmans. MI | T licensed, see LICENSE.rst.
from __future__ import unicode_literals
# from django.db import models
# from django.utils.translati | on import ugettext_lazy as _
# Create your models here.
|
Ecogenomics/GTDBNCBI | scripts_dev/type_genome_selection/generate_date_table.py | Python | gpl-3.0 | 10,985 | 0.006372 | #!/usr/bin/env python3
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'generate_date_table.py'
__prog_desc__ = 'Generate table with LPSN year or priority for species and subspecies names.'
__author__ = 'Pierre Chaumeil'
__copyright__ = 'Copyright 2018'
__credits__ = ['Pierre Chaumeil']
__license__ = 'GPL3'
__version__ = '0.0.2'
__maintainer__ = 'Pierre Chaumeil'
__email__ = 'uqpchaum@uq.edu.au'
__status__ = 'Development'
import os
import sys
import csv
import argparse
import re
import datetime
import logging
from biolib.logger import logger_setup
class DateEditor(object):
"""Main class
"""
def __init__(self):
"""Initialization."""
self.logger = logging.getLogger('timestamp')
def parse_lpsn_scraped_priorities(self, lpsn_scraped_species_info):
"""Parse year of priority from references scraped from LPSN."""
priorities = {}
with open(lpsn_scraped_species_info) as lsi:
lsi.readline()
for line in lsi:
infos = line.rstrip('\n').split('\t')
sp = infos[0]
if sp == 's__':
# *** hack to skip bad case in file
# Pierre to fix
continue
species_authority = infos[2]
reference_str = species_authority.split(', ')[0]
references = reference_str.replace('(', '').replace(')', '')
years = re.sub(r'emend\.[^\d]*\d{4}', '', references)
years = re.sub(r'ex [^\d]*\d{4}', ' ', years)
years = re.findall('[1-3][0-9]{3}', years, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
if len(years) == 0:
# assume this name is validated through ICN and just take the first
# date given as the year of priority
years = re.findall('[1-3][0-9]{3}', references, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
priorities[sp.replace('s__', '')] = years[0]
# We make sure that species and subspecies type species have the same date
# ie Photorhabdus luminescens and Photorhabdus luminescens subsp.
# Luminescens
for k, v in priorities.items():
infos_name = k.split(' ')
if len(infos_name) == 2 and '{0} {1} subsp. {1}'.format(infos_name[0], infos_name[1]) in priorities:
priorities[k] = min(int(v), int(priorities.get(
'{0} {1} subsp. {1}'.format(infos_name[0], infos_name[1]))))
elif len(infos_name) == 4 and infos_name[1] == infos_name[3] and '{} {}'.format(infos_name[0], infos_name[1]) in priorities:
priorities[k] = min(int(v), int(priorities.get(
'{} {}'.format(infos_name[0], infos_name[1]))))
return priorities
def parse_lpsn_gss_priorities(self, lpsn_gss_file):
"""Get priority of species and usbspecies from LPSN GSS file."""
priorities = {}
illegitimate_names = set()
with open(lpsn_gss_file, encoding='utf-8', errors='ignore') as f:
csv_reader = csv.reader(f)
for line_num, tokens in enumerate(csv_reader):
if line_num == 0:
genus_idx = tokens.index('genus_name')
specific_idx = tokens.index('sp_epithet')
subsp_idx = tokens.index('subsp_epithet')
status_idx = tokens.index('status')
author_idx = tokens.index('authors')
else:
generic = tokens[genus_idx].strip().replace('"', '')
specific = tokens[specific_idx].strip().replace('"', '')
subsp = tokens[subsp_idx].strip().replace('"', '')
if subsp:
taxon = '{} {} subsp. {}'.format(generic, specific, subsp)
elif specific:
taxon = '{} {}'.format(generic, specific)
else:
# skip genus entries
continue
status = tokens[status_idx].strip().replace('"', '')
status_tokens = [t.strip() for t in status.split(';')]
status_tokens = [tt.strip() for t in status_tokens for tt in t.split(',') ]
if 'illegitimate name' in status_tokens:
illegitimate_names.add(taxon)
if taxon in priorities:
continue
# get priority references, ignoring references if they are
# marked as being a revied name as indicated by a 'ex' or 'emend'
# (e.g. Holospora (ex Hafkine 1890) Gromov and Ossipov 1981)
ref_str = tokens[author_idx]
references = ref_str.replace('(', '').replace(')', '')
years = re.sub(r'emend\.[^\d]*\d{4}', '', references)
years = re.sub(r'ex [^\d]*\d{4}', ' ', years)
years = re.findall('[1-3][0-9]{3}', years, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
if (taxon not in illegitimate_names
and taxon in priorities
and years[0] != priorities[taxon]):
# conflict that can't be attributed to one of the entries being
# considered an illegitimate name
self.logger.error('Conflicting priority references for {}: {} {}'.format(
taxon, years, priorities[taxon]))
priorities[taxon] = years[0]
return priorities
def run(self, lpsn_scraped_species_info, lpsn_gss_file, out_dir):
"""Parse priority year from LPSN data."""
self.logger.info('Reading priority references scrapped from LPSN.')
scraped_sp_priority = self.parse_lpsn_scraped_priorities(lpsn_scraped_species_info)
self.lo | gger.info(' - read priority for {:,} species.'.format(len(scraped_sp_priority)))
self.logger.info('Reading priority references from LPSN GSS file.')
gss_sp_priority = self.parse_lpsn_gss_priorities(lpsn_gss_file)
self.logger.info(' - read priority for {:,} species.' | .format(len(gss_sp_priority)))
self.logger.info('Scrapped pr |
hamzehd/edx-platform | openedx/core/djangoapps/content/course_overviews/tests.py | Python | agpl-3.0 | 21,034 | 0.003803 | """
Tests for course_overviews app.
"""
import datetime
import ddt
import itertools
import math
import mock
import pytz
from django.utils import timezone
from lms.djangoapps.certificates.api import get_active_web_certificate
from lms.djangoapps.courseware.courses import course_image_url
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls, check_mongo_calls_range
from .models import CourseOverview
@ddt.ddt
class CourseOverviewTestCase(ModuleStoreTestCase):
"""
Tests for CourseOverviewDescriptor model.
"""
TODAY = timezone.now()
LAST_MONTH = TODAY - datetime.timedelta(days=30)
LAST_WEEK = TODAY - datetime.timedelta(days=7)
NEXT_WEEK = TODAY + datetime.timedelta(days=7)
NEXT_MONTH = TODAY + datetime.timedelta(days=30)
COURSE_OVERVIEW_TABS = {'courseware', 'info', 'textbooks', 'discussion', 'wiki', 'progress'}
def check_course_overview_against_course(self, course):
"""
Compares a CourseOverview object against its corresponding
CourseDescriptor object.
Specifically, given a course, test that data within the following three
objects match each other:
- the CourseDescriptor itself
- a CourseOverview that was newly constructed from _create_from_course
- a CourseOverview that was loaded from the MySQL database
Arguments:
course (CourseDescriptor): the course to be checked.
"""
def get_seconds_since_epoch(date_time):
"""
Returns the number of seconds between the Unix Epoch and the given
datetime. If the given datetime is None, return None.
Arguments:
date_time (datetime): the datetime in question.
"""
if date_time is None:
return None
epoch = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
return math.floor((date_time - epoch).total_seconds())
# Load the CourseOverview from the cache twice. The first load will be a cache miss (because the cache
# is empty) so the course will be newly created with CourseOverviewDescriptor.create_from_course. The second
# load will be a cache hit, so the course will be loaded from the cache.
course_overview_cache_miss = CourseOverview.get_from_id(course.id)
course_overview_cache_hit = CourseOverview.get_from_id(course.id)
# Test if value of these attributes match between the three objects
fields_to_test = [
'id',
'display_name',
'display_number_with_default',
'display_org_with_default',
'advertised_start',
'facebook_url',
'social_sharing_url',
'certificates_display_behavior',
'certificates_show_before_end',
'cert_name_short',
'cert_name_long',
'lowest_passing_grade',
'end_of_course_survey_url',
'mobile_available',
'visible_to_staff_only',
'location',
'number',
'url_name',
'display_name_with_default',
'start_date_is_still_default',
'pre_requisite_courses',
'enrollment_domain',
'invitation_only',
'max_student_enrollments_allowed',
]
for attribute_name in fields_to_test:
course_value = getattr(course, attribute_name)
cache_miss_value = getattr(course_overview_cache_miss, attribute_name)
cache_hit_value = getattr(course_overview_cache_hit, attribute_name)
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
# Test if return values for all methods are equal between the three objects
methods_to_test = [
('clean_id', ()),
('clean_id', ('#',)),
('has_ended', ()),
('has_started', ()),
('start_datetime_text', ('SHORT_DATE',)),
('start_datetime_text', ('DATE_TIME',)),
('end_datetime_text', ('SHORT_DATE',)),
('end_datetime_text', ('DATE_TIME',)),
('may_certify', ()),
]
for method_name, method_args in methods_to_test:
course_value = getattr(course, method_name)(*method_args)
cache_miss_value = getattr(course_overview_cache_miss, method_name)(*method_args)
cache_hit_value = getattr(course_overview_cache_hit, method_name)(*method_args)
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
# Other values to test
# Note: we test the start and end attributes here instead of in
# fields_to_test, because I ran into trouble while testing datetimes
# for equality. When writing and reading dates from databases, the
# resulting values are often off by fractions of a second. So, as a
# workaround, we simply test if the start and end times are the same
# number of seconds from the Unix epoch.
others_to_test = [
(
course_image_url(course),
course_overview_cache_miss.course_image_url,
course_overview_cache_hit.course_image_url
),
(
get_active_web_certificate(course) is not None,
course_overview_cache_miss.has_any_active_web_certificate,
course_overview_cache_hit.has_any_active_web_certificate
),
(
get_seconds_since_epoch(course.start),
get_seconds_since_epoch(course_overview_cache_miss.start),
get_seconds_since_epoch(course_overview_cache_hit.start),
),
(
get_seconds_since_epoch(course.end),
get_seconds_since_epoch(course_overview_cache_miss.end),
get_seconds_since_epoch(course_overview_cache_hit.end),
),
(
get_seconds_since_epoch(course.enrollment_start),
get_seconds_since_epoch(course_overview_cache_miss.enrollment_start),
get_seconds_since_epoch(course_overview_cache_hit.enrollment_start),
),
(
get_seconds_since_epoch(course.enrollment_end),
get_seconds | _since_epoch(course_overview_cache_miss.enrollment_end),
get_seconds_since_epoch(course_overview_cache_hit.enrollment_end),
),
]
for (course_value, cache_miss_value, cache_hit_value) in others_to_test:
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
# test tabs for both cached miss a | nd cached hit courses
for course_overview in [course_overview_cache_miss, course_overview_cache_hit]:
course_overview_tabs = course_overview.tabs.all()
course_resp_tabs = {tab.tab_id for tab in course_overview_tabs}
self.assertEqual(self.COURSE_OVERVIEW_TABS, course_resp_tabs)
@ddt.data(*itertools.product(
[
{
"display_name": "Test Course", # Display name provided
"start": LAST_WEEK, # In the middle of the course
"end": NEXT_WEEK,
"advertised_start": "2015-01-01 11:22:33", # Parse-able advertised_start
"pre_requisite_courses": [ # Has pre-requisites
'course-v1://edX+test1+run1',
'course-v1://edX+test2+run1'
],
"static_asset_path": "/my/abs/path", # Absolute path
"certificates_show_before_end": True,
},
|
egid/stratux | test/screen/screen.py | Python | bsd-3-clause | 3,003 | 0.005994 | #!/usr/bin/env python
from oled.device import ssd1306, sh1106
from oled.render import canvas
from PIL import ImageDraw, ImageFont, Image
import urllib2
import json
import time
font2 = ImageFont.truetype('/root/stratux/test/screen/CnC_Red_Alert.ttf', 12)
oled = ssd1306(port=1, address=0x3C)
with canvas(oled) as draw:
logo = Image.open('/root/stratux/test/screen/logo.bmp')
draw.bitmap((32, 0), logo, fill=1)
time.sleep(10)
n = 0
while 1:
time.sleep(1)
response = urllib2.urlopen('http://localhost/getStat | us')
getStatusHTML = response.read()
getStatusData = json.loads(getStatusH | TML)
CPUTemp = getStatusData["CPUTemp"]
uat_current = getStatusData["UAT_messages_last_minute"]
uat_max = getStatusData["UAT_messages_max"]
es_current = getStatusData["ES_messages_last_minute"]
es_max = getStatusData["ES_messages_max"]
response = urllib2.urlopen('http://localhost/getTowers')
getTowersHTML = response.read()
getTowersData = json.loads(getTowersHTML)
NumTowers = len(getTowersData)
with canvas(oled) as draw:
pad = 2 # Two pixels on the left and right.
text_margin = 25
# UAT status.
draw.text((50, 0), "UAT", font=font2, fill=255)
# "Status bar", 2 pixels high.
status_bar_width_max = oled.width - (2 * pad) - (2 * text_margin)
status_bar_width = 0
if uat_max > 0:
status_bar_width = int((float(uat_current) / uat_max) * status_bar_width_max)
draw.rectangle((pad + text_margin, 14, pad + text_margin + status_bar_width, 20), outline=255, fill=255) # Top left, bottom right.
# Draw the current (left) and max (right) numbers.
draw.text((pad, 14), str(uat_current), font=font2, fill=255)
draw.text(((2*pad) + text_margin + status_bar_width_max, 14), str(uat_max), font=font2, fill=255)
# ES status.
draw.text((44, 24), "1090ES", font=font2, fill=255)
status_bar_width = 0
if es_max > 0:
status_bar_width = int((float(es_current) / es_max) * status_bar_width_max)
draw.rectangle((pad + text_margin, 34, pad + text_margin + status_bar_width, 40), outline=255, fill=255) # Top left, bottom right.
# Draw the current (left) and max (right) numbers.
draw.text((pad, 34), str(es_current), font=font2, fill=255)
draw.text(((2*pad) + text_margin + status_bar_width_max, 34), str(es_max), font=font2, fill=255)
# Other stats.
seq = (n / 5) % 2
t = ""
if seq == 0:
t = "CPU: %0.1fC, Towers: %d" % (CPUTemp, NumTowers)
if seq == 1:
t = "GPS Sat: %d/%d/%d" % (getStatusData["GPS_satellites_locked"], getStatusData["GPS_satellites_seen"], getStatusData["GPS_satellites_tracked"])
if getStatusData["GPS_solution"] == "GPS + SBAS (WAAS / EGNOS)":
t = t + " (WAAS)"
print t
draw.text((pad, 45), t, font=font2, fill=255)
n = n+1 |
alanfranz/mock | py/mockchain.py | Python | gpl-2.0 | 12,065 | 0.005305 | #!/usr/bin/python -tt
# by skvidal@fedoraproject.org
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2012 Red Hat, Inc.
# SUMMARY
# mockchain
# take a mock config and a series of srpms
# rebuild them one at a time
# adding each to a local repo
# so they are available as build deps to next pkg being built
import sys
import subprocess
import os
import optparse
import tempfile
import shutil
from urlgrabber import grabber
import time
import mockbuild.util
# all of the variables below are substituted by the build system
__VERSION__ = "unreleased_version"
SYSCONFDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "..", "etc")
PYTHONDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
PKGPYTHONDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "mockbuild")
MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
# end build system subs
mockconfig_path='/etc/mock'
def createrepo(path):
if os.path.exists(path + '/repodata/repomd.xml'):
comm = ['/usr/bin/createrepo', '--update', path]
else:
comm = ['/usr/bin/createrepo', path]
cmd = subprocess.Popen(comm,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
return out, err
def parse_args(args):
parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
parser.add_option('-r', '--root', default=None, dest='chroot',
help="chroot config name/base to use in the mock build")
parser.add_option('-l', '--localrepo', default=None,
help="local path for the local repo, defaults to making its own")
parser.add_option('-c', '--continue', default=False, action='store_true',
dest='cont',
help="if a pkg fails to build, continue to the next one")
parser.add_option('-a','--addrepo', default=[], action='append',
dest='repos',
help="add these repo baseurls to the chroot's yum config")
parser.add_option('--recurse', default=False, action='store_true',
help="if more than one pkg and it fails to build, try to build the rest and come back to it")
parser.add_option('--log', default=None, dest='logfile',
help="log to the file named by this option, defaults to not logging")
parser.add_option('--tmp_prefix', default=None, dest='tmp_prefix',
help="tmp dir prefix - will default to username-pid if not specified")
#FIXME?
# figure out how to pass other args to mock?
opts, args = parser.parse_args(args)
if opts.recurse:
opts.cont = True
if not opts.chroot:
print "You must provide an argument to -r for the mock chroot"
sys.exit(1)
if len(sys.argv) < 3:
print "You must specifiy at least 1 package to build"
sys.exit(1)
return opts, args
def add_local_repo(infile, destfile, baseurl, repoid=None):
"""take a mock chroot config and add a repo to it's yum.conf
infile = mock chroot config file
destfile = where to save out the result
baseurl = baseurl of repo you wish to add"""
global config_opts
try:
execfile(infile)
if not repoid:
repoid=baseurl.split('//')[1].replace('/','_')
localyumrepo="""
[%s]
name=%s
baseurl=%s
enabled=1
skip_if_unavailable=1
metadata_expire=30
cost=1
""" % (repoid, baseurl, baseurl)
config_opts['yum.conf'] += localyumrepo
br_dest = open(destfile, 'w')
for k,v in config_opts.items():
br_dest.write("config_opts[%r] = %r\n" % (k, v))
br_dest.close()
return True, ''
except (IOError, OSError):
return False, "Could not write mock config to %s" % destfile
return True, ''
def do_build(opts, cfg, pkg):
# returns 0, cmd, out, err = failure
# returns 1, cmd, out, err = success
# returns 2, None, None, None = already built
s_pkg = os.path.basename(pkg)
pdn = s_pkg.replace('.src.rpm', '')
resdir = '%s/%s' % (opts.local_repo_dir, pdn)
resdir = os.path.normpath(resdir)
if not os.path.exists(resdir):
os.makedirs(resdir)
success_file = resdir + '/success'
fail_file = resdir + '/fail'
if os.path.exists(success_file):
return 2, None, None, None
# clean it up if we're starting over :)
if os.path.exists(fail_file):
os.unlink(fail_file)
mockcmd = ['/usr/bin/mock',
'--configdir', opts.config_path,
'--resultdir', resdir,
'--uniqueext', opts.uniqueext,
'-r', cfg, ]
print 'building %s' % s_pkg
mockcmd.append(pkg)
cmd = subprocess.Popen(mockcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
out, err = cmd.communicate()
if cmd.returncode == 0:
open(success_file, 'w').write('done\n')
ret = 1
else:
open(fail_file, 'w').write('undone\n')
ret = 0
return ret, cmd, out, err
def log(lf, msg):
if lf:
now = time.time()
try:
open(lf, 'a').write(str(now) + ':' + msg + '\n')
except (IOError, OSError), e:
print 'Could not write to logfile %s - %s' % (lf, str(e))
print msg
config_opts = {}
def main(args):
global config_opts
config_opts = mockbuild.util.setup_default_config_opts(os.getgid(), __VERSION__, PKGPYTHONDIR)
opts, args = parse_args(args)
# take mock config + list of pkgs
cfg=opts.chroot
pkgs=args[1:]
mockcfg = mockconfig_path + '/' + cfg + '.cfg'
if not os.path.exists(mockcfg):
print "could not find config: %s" % mockcfg
sys.exit(1)
if not opts.tmp_prefix:
try:
opts.tmp_prefix = os.getlogin()
except OSError, e:
print "Could not find login name for tmp dir prefix add --tmp_prefix"
sys.exit(1)
pid = os.getpid()
opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
# create a tempdir for our local info
if opts.localrepo:
local_tmp_dir = os.path.abspath(opts.localrepo)
if not os.path.exists(local_tmp_dir):
os.makedirs(local_tmp_dir)
else:
pre = 'mock-chain-%s-' % opts.uniqueext
local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
os.chmod(local_tmp_dir, 0755)
if opts.logfile:
| opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
if os.path.exists(opts.logfile):
os.unlink(opts.logfile)
| log(opts.logfile, "starting logfile: %s" % opts.logfile)
opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + cfg + '/')
if not os.path.exists(opts.local_repo_dir):
os.makedirs(opts.local_repo_dir, mode=0755)
local_baseurl="file://%s" % opts.local_repo_dir
log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + cfg + '/')
if not os.path.exists(opts.config_path):
os.makedirs(opts.config_path, mode=0755)
log(opts.logfile, "config dir: %s" % opts.config_path)
my_mock_config = opts.config_path + '/' + os.path.basename(mockcfg)
# modify with localrepo
res, msg = add_local_repo(mockcfg, my_mock_config, local_baseurl, 'local_build_repo')
if not res:
log(opts.logfile, "Error: Could not write out local config: %s" % msg)
sys.exit(1)
for baseurl in opts.repos:
res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
if not res:
|
jmatthed/avatar-python | avatar/__init__.py | Python | apache-2.0 | 90 | 0.033333 |
#from configuration.manager import ConfigurationManag | er
#config = Configuration | Manager() |
837468220/python-for-android | python3-alpha/python3-src/Tools/freeze/checkextensions_win32.py | Python | apache-2.0 | 6,199 | 0.00613 | """Extension management for Windows.
Under Windows it is unlikely the .obj files are of use, as special compiler options
are needed (primarily to toggle the behavior of "public" symbols.
I dont consider it worth parsing the MSVC makefiles for compiler options. Even if
we get it just right, a specific freeze application may have specific compiler
options anyway (eg, to enable or disable specific functionality)
So my basic strategy is:
* Have some Windows INI files which "describe" one or more extension modules.
(Freeze comes with a default one for all known modules - but you can specify
your own).
* This description can include:
- The MSVC .dsp file for the extension. The .c source file names
are extraced from there.
- Specific compiler/linker options
- Flag to indicate if Unicode compilation is expected.
At the moment the name and location of this INI file is hardcoded,
but an obvious enhancement would be to provide command line options.
"""
import os, sys
try:
import win32api
except ImportError:
win32api = None # User has already been warned
class CExtension:
"""An abstraction of an extension implemented in C/C++
"""
def __init__(self, name, sourceFiles):
self.name = name
# A list of strings defining additional compiler options.
self.sourceFiles = sourceFiles
# A list of special compiler options to be applied to
# all source modules in this extension.
self.compilerOptions = []
# A list of .lib files the final .EXE will need.
self.linkerLibs = []
def GetSourceFiles(self):
return self.sourceFiles
def AddCompilerOption(self, option):
self.compilerOptions.append(option)
def GetCompilerOptions(self):
return self.compilerOptions
def AddLinkerLib(self, lib):
self.linkerLibs.append(lib)
def GetLinkerLibs(self):
return self.linkerLibs
def checkextensions(unknown, extra_inis, prefix):
# Create a table of frozen extensions
defaultMapName = os.path.join( os.path.split(sys.argv[0])[0], "extensions_win32.ini")
if not os.path.isfile(defaultMapName):
sys.stderr.write("WARNING: %s can not be found - standard extensions may not be found\n" % defaultMapName)
else:
# must go on end, so other inis can override.
extra_inis.append(defaultMapName)
ret = []
for mod in unknown:
for ini in extra_inis:
# print "Looking for", mod, "in", win32api.GetFullPathN | ame(ini),"...",
defn = get_extension_defn( mod, ini, prefix )
if defn is not None:
# print "Yay - found it!"
ret.append( defn )
break
# print "Nope!"
else: # For not broken!
sys.stderr.write("No definition of module %s in any specified map file.\n" % | (mod))
return ret
def get_extension_defn(moduleName, mapFileName, prefix):
if win32api is None: return None
os.environ['PYTHONPREFIX'] = prefix
dsp = win32api.GetProfileVal(moduleName, "dsp", "", mapFileName)
if dsp=="":
return None
# We allow environment variables in the file name
dsp = win32api.ExpandEnvironmentStrings(dsp)
# If the path to the .DSP file is not absolute, assume it is relative
# to the description file.
if not os.path.isabs(dsp):
dsp = os.path.join( os.path.split(mapFileName)[0], dsp)
# Parse it to extract the source files.
sourceFiles = parse_dsp(dsp)
if sourceFiles is None:
return None
module = CExtension(moduleName, sourceFiles)
# Put the path to the DSP into the environment so entries can reference it.
os.environ['dsp_path'] = os.path.split(dsp)[0]
os.environ['ini_path'] = os.path.split(mapFileName)[0]
cl_options = win32api.GetProfileVal(moduleName, "cl", "", mapFileName)
if cl_options:
module.AddCompilerOption(win32api.ExpandEnvironmentStrings(cl_options))
exclude = win32api.GetProfileVal(moduleName, "exclude", "", mapFileName)
exclude = exclude.split()
if win32api.GetProfileVal(moduleName, "Unicode", 0, mapFileName):
module.AddCompilerOption('/D UNICODE /D _UNICODE')
libs = win32api.GetProfileVal(moduleName, "libs", "", mapFileName).split()
for lib in libs:
module.AddLinkerLib(win32api.ExpandEnvironmentStrings(lib))
for exc in exclude:
if exc in module.sourceFiles:
modules.sourceFiles.remove(exc)
return module
# Given an MSVC DSP file, locate C source files it uses
# returns a list of source files.
def parse_dsp(dsp):
# print "Processing", dsp
# For now, only support
ret = []
dsp_path, dsp_name = os.path.split(dsp)
try:
lines = open(dsp, "r").readlines()
except IOError as msg:
sys.stderr.write("%s: %s\n" % (dsp, msg))
return None
for line in lines:
fields = line.strip().split("=", 2)
if fields[0]=="SOURCE":
if os.path.splitext(fields[1])[1].lower() in ['.cpp', '.c']:
ret.append( win32api.GetFullPathName(os.path.join(dsp_path, fields[1] ) ) )
return ret
def write_extension_table(fname, modules):
fp = open(fname, "w")
try:
fp.write (ext_src_header)
# Write fn protos
for module in modules:
# bit of a hack for .pyd's as part of packages.
name = module.name.split('.')[-1]
fp.write('extern void init%s(void);\n' % (name) )
# Write the table
fp.write (ext_tab_header)
for module in modules:
name = module.name.split('.')[-1]
fp.write('\t{"%s", init%s},\n' % (name, name) )
fp.write (ext_tab_footer)
fp.write(ext_src_footer)
finally:
fp.close()
ext_src_header = """\
#include "Python.h"
"""
ext_tab_header = """\
static struct _inittab extensions[] = {
"""
ext_tab_footer = """\
/* Sentinel */
{0, 0}
};
"""
ext_src_footer = """\
extern DL_IMPORT(int) PyImport_ExtendInittab(struct _inittab *newtab);
int PyInitFrozenExtensions()
{
return PyImport_ExtendInittab(extensions);
}
"""
|
Kagami/kisa | lib/twisted/internet/_newtls.py | Python | cc0-1.0 | 8,938 | 0.002014 | # -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements memory BIO based TLS support. It is the preferred
implementation and will be used whenever pyOpenSSL 0.10 or newer is installed
(whenever L{twisted.protocols.tls} is importable).
@since: 11.1
"""
from zope.interface import implements
from zope.interface import directlyProvides
from twisted.internet.interfaces import ITLSTransport, ISSLTransport
from twisted.internet.abstract import FileDescriptor
from twisted.internet._ssl import _TLSDelayed
from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
class _BypassTLS(object):
"""
L{_BypassTLS} is used as the transport object for the TLS protocol object
used to implement C{startTLS}. Its methods skip any TLS logic which
C{startTLS} enables.
@ivar _base: A transport class L{_BypassTLS} has been mixed in with to which
methods will be forwarded. This class is only responsible for sending
bytes over the connection, not doing TLS.
@ivar _connection: A L{Connection} which TLS has been started on which will
be proxied to by this object. Any method which has its behavior
altered after C{startTLS} will be skipped in favor of the base class's
implementation. This allows the TLS protocol object to have direct
access to the transport, necessary to actually implement TLS.
"""
def __init__(self, base, connection):
self._base = base
self._connection = connection
def __getattr__(self, name):
"""
Forward any extra attribute access to the original transport object.
For example, this exposes C{getHost}, the behavior of which does not
change after TLS is enabled.
"""
return getattr(self._connection, name)
def write(self, data):
"""
Write some bytes directly to the connection.
"""
return self._base.write(self._connection, data)
def writeSequence(self, iovec):
"""
Write a some bytes directly to the connection.
"""
return self._base.writeSequence(self._connection, iovec)
def loseConnection(self, *args, **kwargs):
"""
Close the underlying connection.
"""
return self._base.loseConnection(self._connection, *args, **kwargs)
def startTLS(transport, contextFactory, normal, bypass):
"""
Add a layer of SSL to a transport.
@param transport: The transport which will be modified. This can either by
a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a
L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The
actual requirements of this instance are that it have:
- a C{_tlsClientDefault} attribute indicating whether the transport is
a client (C{True}) or a server (C{False})
- a settable C{TLS} attribute which can be used to mark the fact
that SSL has been started
- settable C{getHandle} and C{getPeerCertificate} attributes so
these L{ISSLTransport} methods can be added to it
- a C{protocol} attribute referring to the L{IProtocol} currently
connected to the transport, which can also be set to a new
L{IProtocol} for the transport to deliver data to
@param contextFactory: An SSL context factory defining SSL parameters for
the new SSL layer.
@type contextFactory: L{twisted.internet.ssl.ContextFactory}
@param normal: A flag indicating whether SSL will go in the same direction
as the underlying transport goes. That is, if the SSL client will be
the underlying client and the SSL server will be the underlying server.
C{True} means it is the same, C{False} means they are switched.
@type param: L{bool}
@param bypass: A transport base class to call methods on to bypass the new
SSL layer (so that the SSL layer itself can send its bytes).
@type bypass: L{type}
"""
# Figure out which direction the SSL goes in. If normal is True,
# we'll go in the direction indicated by the subclass. Otherwise,
# we'll go the other way (client = not normal ^ _tlsClientDefault,
# in other words).
if normal:
client = transport._tlsClientDefault
else:
client = not transport._tlsClientDefault
tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None)
tlsProtocol = TLSMemoryBIOProtocol(tlsFactory, transport.protocol, False)
transport.protocol = tlsProtocol
transport.getHandle = tlsProtocol.getHandle
transport.getPeerCertificate = tlsProtocol.getPeerCertificate
# Mark the transport as secure.
directlyProvides(transport, ISSLTransport)
# Remember we did this so that write and writeSequence can send the
# data to the right place.
transport.TLS = True
# Hook it up
transport.protocol.makeConnection(_BypassTLS(bypass, transport))
class ConnectionMixin(object):
"""
A mixin for L{twisted.internet.abstract.FileDescriptor} which adds an
L{ITLSTransport} implementation.
@ivar TLS: A flag indicating whether TLS is currently in use on this
transport. This is not a good way for applications to check for TLS,
instead use L{ISSLTransport.providedBy}.
@ivar _tlsWaiting: If TLS has been requested but the write buffer for
non-TLS data still needs to be flushed, this is set to a L{_TLSDelayed}
instance which will buffer data that must only be sent once TLS has been
started.
"""
implements(ITLSTransport)
TLS = False
_tlsWaiting = None
def startTLS(self, ctx, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
if self.dataBuffer or self._tempDataBuffer:
# pre-TLS bytes are still being written. Starting TLS now
# will do the wrong thing. Instead, mark that we're trying
# to go into the TLS state.
| self._tlsWaiting = _TLSDelayed([], ctx, normal)
return False
startTLS(self, ctx, normal, FileDescriptor)
def write(self, bytes):
"""
Write some bytes to this connection, passing them through a TLS layer if
necessary, or discarding them if the | connection has already been lost.
"""
if self.TLS:
if self.connected:
self.protocol.write(bytes)
elif self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.append(bytes)
else:
FileDescriptor.write(self, bytes)
def writeSequence(self, iovec):
"""
Write some bytes to this connection, scatter/gather-style, passing them
through a TLS layer if necessary, or discarding them if the connection
has already been lost.
"""
if self.TLS:
if self.connected:
self.protocol.writeSequence(iovec)
elif self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.extend(iovec)
else:
FileDescriptor.writeSequence(self, iovec)
def loseConnection(self):
"""
Close this connection after writing all pending data.
If TLS has been negotiated, perform a TLS shutdown.
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
FileDescriptor.loseConnection(self)
def doWrite(self):
"""
Write out some data from the send buffer.
If the buffer becomes empty and TLS has been requested but not yet
enabled, enable it.
"""
result = FileDescriptor.doWrite(self)
if self._tlsWaiting is not None:
if not self.dataBuffer and not self._tempDataBuffer:
waiting = self._tlsWaiting
self._tlsWaiting = None
self.startTLS(waiting.context, waiting.extra)
self.writeSequence(waiting.bufferedData)
return result
class ClientMix |
deenjohn/TimelineJS | website/core/settings/loc.py | Python | mpl-2.0 | 279 | 0.014337 | """Local settings and globals."""
import sys
from os. | path import normpath, join
from .base import *
# Import secrets -- not needed
#sys.path.append(
# abspath(join(PROJECT_ROOT, '../secrets/TimelineJS/stg'))
#)
#from secrets import *
# Set static | URL
STATIC_URL = '/static' |
LLNL/spack | var/spack/repos/builtin/packages/openslide/package.py | Python | lgpl-2.1 | 803 | 0.003736 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openslide(AutotoolsPackage):
"""Op | enSlide reads whole slide image files."""
homepage = "https://openslide.org/"
url = "https://github.com/openslide/openslide/releases/download/v3.4.1/openslide-3.4.1.tar.xz"
version('3.4.1', sha256='993803 | 4dba7f48fadc90a2cdf8cfe94c5613b04098d1348a5ff19da95b990564')
depends_on('pkgconfig', type='build')
depends_on('openjpeg')
depends_on('jpeg')
depends_on('libtiff')
depends_on('libxml2')
depends_on('sqlite@3.6:')
depends_on('glib')
depends_on('cairo+pdf')
depends_on('gdk-pixbuf')
|
radiantflow/django-comments | tests/testapp/tests/feed_tests.py | Python | bsd-3-clause | 1,879 | 0.009047 | from __future__ import absolute_import
from xml.etree import ElementTree as ET
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from comments.models import Comment
from . import CommentTestCase
from ..models import Article
class CommentFeedTests(CommentTestCase):
urls = 'testapp.urls'
feed_url = '/rss/comments/'
def setUp(self):
site_2 = Site.objects.create(id=settings.SITE_ID+1,
domain="example2.com", name="example2.com")
# A comment for another site
c5 = Comment.objects.create(
content_type = ContentType.objects.get_for_model(Article),
object_pk = "1",
user_name = "Joe Somebody",
user_email = "jsomebody@example.com",
user_url | = "http://example.com/~joe/",
comment = "A comment for the second site.",
site = site_2,
)
def test_feed(self):
response = self.client.get(self.feed_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/rss+xml; charset=utf-8')
rss_elem = ET.fro | mstring(response.content)
self.assertEqual(rss_elem.tag, "rss")
self.assertEqual(rss_elem.attrib, {"version": "2.0"})
channel_elem = rss_elem.find("channel")
title_elem = channel_elem.find("title")
self.assertEqual(title_elem.text, "example.com comments")
link_elem = channel_elem.find("link")
self.assertEqual(link_elem.text, "http://example.com/")
atomlink_elem = channel_elem.find("{http://www.w3.org/2005/Atom}link")
self.assertEqual(atomlink_elem.attrib, {"href": "http://example.com/rss/comments/", "rel": "self"})
self.assertNotContains(response, "A comment for the second site.")
|
adamsumm/CausalMario | HiddenCauses/rescal.py-master/Mario/parse.py | Python | mit | 2,164 | 0.063771 | import sys
import re
import random
vevent = re.compile("V\s+([\-\d\w]+)\s+(\w+)")
sevent = re.compile("S\s+([\-\d\w]+)\s+([\-\d\w]+)")
aevent = re.compile("A\s+([\-\d\w]+)")
devent = re.compile("D\s+([\-\d\w]{2,})")
cevent = re.compile("C\s+([\-\d\w]+)\s+[\-\d]*\s*([\-\d\w]+)\s+([\-\d\w]+)")
animate = ["Goomba","Mario","BigMario","FireMario","GreenKoopa","RedKoopa"]
enemies = ["Goomba","GreenKoopa","RedKoopa"]
dirs = ["U","D","L","R"]
opposite = {
"U":"D",
"D":"U",
"L":"R",
"R":"L"
}
enemyOdds = 1.0/3200.0
bushOdds = 1.0/3200.0
with open(sys.argv[1],'r') as openfile:
print "ObjectA,ObjectB,A2BDir,EffectType,Source,Target,VelChange"
causes = []
effects = []
for line in openfile:
if 'NEWFRAME' in line:
#print causes
if random.random() < bushOdds:
an = random.choice(animate)
d =random.choice(dirs)
causes.append(["Bush",an,d])
causes.append([an,"Bush",opposite[d]])
if random.random() < enemyOdds:
e1 = random.choice(enemies)
e2 = random.choice(enemies)
d =random.choice(dirs)
causes.append([e1,e2,d])
causes.append([e2,e1,opposite[d]])
if not causes:
pass
#causes.append(["None","None","None" | ])
for cause in causes:
if not effects:
print ",".join(cause) + ",None,None,None,None"
for effect in effects:
print ",".join(cause) + "," + ",".join(effect)
causes = []
effects = []
else:
| amatch = aevent.search(line)
dmatch = devent.search(line)
smatch = sevent.search(line)
cmatch = cevent.search(line)
vmatch = vevent.search(line)
if amatch:
effects.append(["append",amatch.group(1),"None","None"])
if vmatch:
effects.append(["VelChange",vmatch.group(1),"None",vmatch.group(2)])
if smatch:
effects.append(["Change",smatch.group(1),smatch.group(2),"None"])
if dmatch:
if 'RUN' not in line:
effects.append(["Delete",dmatch.group(1),"None","None"])
if cmatch:
o1 = cmatch.group(1)
o2 = cmatch.group(2)
if "-" in o1:
o1 = "B" + o1
if "-" in o2:
o2 = "B" + o2
causes.append([o1,o2,cmatch.group(3)])
causes.append([o2,o1,opposite[cmatch.group(3)]])
|
WillianPaiva/ycmd | ycmd/server_state.py | Python | gpl-3.0 | 4,592 | 0.02635 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import imp
import os
import threading
from ycmd.utils import ForceSemanticCompletion
from ycmd.completers.general.general_completer_store import (
GeneralCompleterStore )
from ycmd.completers.completer_utils import PathToFiletypeCompleterPluginLoader
class ServerState( object ):
def __init__( self, user_options ):
self._user_options = user_options
self._filetype_completers = {}
self._filetype_completers_lock = threading.Lock()
self._gencomp = GeneralCompleterStore( self._user_options )
@property
def user_options( self ):
return self._user_options
def Shutdown( self ):
with self._filetype_completers_lock:
for completer in self._filetype_completers.itervalues():
if completer:
completer.Shutdown()
self._gencomp.Shutdown()
def _GetFiletypeCompleterForFiletype( self, filetype ):
with self._filetype_completers_lock:
try:
return self._filetype_completers[ filetype ]
except KeyError:
pass
module_path = PathToFiletypeCompleterPluginLoader( filetype )
completer = None
supported_filetypes = [ filetype ]
if os.path.exists( module_path ):
module = imp.load_source( filetype, module_path )
completer = module.GetCompleter( self._user_options )
if completer:
supported_filetypes.extend( | completer.SupportedFiletypes() )
for supported_filetype in supported_filetypes:
self._filetype_completers[ supported_filetype ] = completer
return completer
def GetFiletypeCompleter( self, current_filetypes ):
completers = [ self._GetFiletypeCompleterForFiletype( filetype )
for filetype in current_filetypes ]
for completer in completers:
if completer:
| return completer
raise ValueError( 'No semantic completer exists for filetypes: {0}'.format(
current_filetypes ) )
def FiletypeCompletionAvailable( self, filetypes ):
try:
self.GetFiletypeCompleter( filetypes )
return True
except:
return False
def FiletypeCompletionUsable( self, filetypes ):
return ( self.CurrentFiletypeCompletionEnabled( filetypes ) and
self.FiletypeCompletionAvailable( filetypes ) )
def ShouldUseGeneralCompleter( self, request_data ):
return self._gencomp.ShouldUseNow( request_data )
def ShouldUseFiletypeCompleter( self, request_data ):
"""
Determines whether or not the semantic completer should be called, and
returns an indication of the reason why. Specifically, returns a tuple:
( should_use_completer_now, was_semantic_completion_forced ), where:
- should_use_completer_now: if True, the semantic engine should be used
- was_semantic_completion_forced: if True, the user requested "forced"
semantic completion
was_semantic_completion_forced is always False if should_use_completer_now
is False
"""
filetypes = request_data[ 'filetypes' ]
if self.FiletypeCompletionUsable( filetypes ):
if ForceSemanticCompletion( request_data ):
# use semantic, and it was forced
return ( True, True )
else:
# was not forced. check the conditions for triggering
return ( self.GetFiletypeCompleter( filetypes ).ShouldUseNow(
request_data ), False )
# don't use semantic, ignore whether or not the user requested forced
# completion
return ( False, False )
def GetGeneralCompleter( self ):
return self._gencomp
def CurrentFiletypeCompletionEnabled( self, current_filetypes ):
filetype_to_disable = self._user_options[
'filetype_specific_completion_to_disable' ]
if '*' in filetype_to_disable:
return False
else:
return not all([ x in filetype_to_disable for x in current_filetypes ])
|
zjuwangg/scrapy | tests/test_pipeline_files.py | Python | bsd-3-clause | 8,013 | 0.002496 | import os
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
| self.assertEqual(result['files'][0]['checksum'], 'abc')
| for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (FilesPipeline.EXPIRES * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class DeprecatedFilesPipeline(FilesPipeline):
def file_key(self, url):
media_guid = hashlib.sha1(to_bytes(url)).hexdigest()
media_ext = os.path.splitext(url)[1]
return 'empty/%s%s' % (media_guid, media_ext)
class DeprecatedFilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def init_pipeline(self, pipeline_class):
self.pipeline = pipeline_class.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def test_default_file_key_method(self):
self.init_pipeline(FilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_key("https://dev.mydeco.com/mydeco.pdf"),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def test_overridden_file_key_method(self):
self.init_pipeline(DeprecatedFilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'empty/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def tearDown(self):
rmtree(self.tempdir)
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()
|
youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/watchpoint/watchpoint_events/TestWatchpointEvents.py | Python | bsd-3-clause | 4,479 | 0.001116 | """Test that adding, deleting and modifying watchpoints sends the appropriate events."""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestWatchpointEvents (TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
@add_test_categories(['pyapi'])
@expectedFailureAll(
oslist=["linux"],
archs=["aarch64"],
bugnumber="llvm.org/pr27710")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24446: WINDOWS XFAIL TRIAGE - Watchpoints not supported on Windows")
def test_with_python_api(self):
"""Test that adding, deleting and modifying watchpoints sends the appropriate events."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb | .SBFileSpec(self.main_source)
break_in_main = target.BreakpointCreateBySourceRegex(
'// P | ut a breakpoint here.', self.main_source_spec)
self.assertTrue(break_in_main, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_main)
if len(threads) != 1:
self.fail("Failed to stop at first breakpoint in main.")
thread = threads[0]
frame = thread.GetFrameAtIndex(0)
local_var = frame.FindVariable("local_var")
self.assertTrue(local_var.IsValid())
self.listener = lldb.SBListener("com.lldb.testsuite_listener")
self.target_bcast = target.GetBroadcaster()
self.target_bcast.AddListener(
self.listener, lldb.SBTarget.eBroadcastBitWatchpointChanged)
self.listener.StartListeningForEvents(
self.target_bcast, lldb.SBTarget.eBroadcastBitWatchpointChanged)
error = lldb.SBError()
local_watch = local_var.Watch(True, False, True, error)
if not error.Success():
self.fail(
"Failed to make watchpoint for local_var: %s" %
(error.GetCString()))
self.GetWatchpointEvent(lldb.eWatchpointEventTypeAdded)
# Now change some of the features of this watchpoint and make sure we
# get events:
local_watch.SetEnabled(False)
self.GetWatchpointEvent(lldb.eWatchpointEventTypeDisabled)
local_watch.SetEnabled(True)
self.GetWatchpointEvent(lldb.eWatchpointEventTypeEnabled)
local_watch.SetIgnoreCount(10)
self.GetWatchpointEvent(lldb.eWatchpointEventTypeIgnoreChanged)
condition = "1 == 2"
local_watch.SetCondition(condition)
self.GetWatchpointEvent(lldb.eWatchpointEventTypeConditionChanged)
self.assertTrue(local_watch.GetCondition() == condition,
'make sure watchpoint condition is "' + condition + '"')
def GetWatchpointEvent(self, event_type):
# We added a watchpoint so we should get a watchpoint added event.
event = lldb.SBEvent()
success = self.listener.WaitForEvent(1, event)
self.assertTrue(success, "Successfully got watchpoint event")
self.assertTrue(
lldb.SBWatchpoint.EventIsWatchpointEvent(event),
"Event is a watchpoint event.")
found_type = lldb.SBWatchpoint.GetWatchpointEventTypeFromEvent(event)
self.assertTrue(
found_type == event_type,
"Event is not correct type, expected: %d, found: %d" %
(event_type,
found_type))
# There shouldn't be another event waiting around:
found_event = self.listener.PeekAtNextEventForBroadcasterWithType(
self.target_bcast, lldb.SBTarget.eBroadcastBitBreakpointChanged, event)
if found_event:
print("Found an event I didn't expect: ", event)
self.assertTrue(not found_event, "Only one event per change.")
|
pombredanne/pants | contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py | Python | apache-2.0 | 2,676 | 0.01009 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
import requests
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_method
from pants.contrib.go.subsystems.imported_repo import ImportedRepo
class GoImportMetaTagReader(Subsystem):
"""Implements a reader for the <meta name="go-import"> protocol.
See https://golang.org/cmd/go/#hdr-Remote_import_paths .
"""
options_scope = 'go-import-metatag-reader'
@classmethod
def register_options(cls, register):
super(GoImportMetaTagReader, cls).register_options(register)
register('--retries', type=int, default=1, advanced=True,
help='How many times to retry when fetching meta tags.')
_META_IMPORT_REGEX = re.compile(r"""
<meta
\s+
name=['"]go-import['"]
\s+
content=['"](?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
\s*
>""", flags=re.VERBOSE)
@classmethod
def find_meta_tag(cls, page_html):
"""Returns the content of the meta tag if found inside of the provided HTML."""
matched = cls._META_IMPORT_REGEX.search(page_html)
if matched:
return matched.groups()
return None, None, None
@ | memoized_method
def get_imported_repo(self, import_path):
"""Looks for a go-import meta tag for the provided import_path.
Returns an ImportedRepo inst | ance with the information in the meta tag,
or None if no go-import meta tag is found.
"""
try:
session = requests.session()
# TODO: Support https with (optional) fallback to http, as Go does.
# See https://github.com/pantsbuild/pants/issues/3503.
session.mount("http://",
requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))
page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))
except requests.ConnectionError:
return None
if not page_data:
return None
root, vcs, url = self.find_meta_tag(page_data.text)
if root and vcs and url:
# Check to make sure returned root is an exact match to the provided import path. If it is
# not then run a recursive check on the returned and return the values provided by that call.
if root == import_path:
return ImportedRepo(root, vcs, url)
elif import_path.startswith(root):
return self.get_imported_repo(root)
return None
|
mseln/klufweb | klufweb/feed/migrations/0004_auto_20141229_1211.py | Python | apache-2.0 | 1,090 | 0.002752 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
| ('feed', '0003_auto_20141227_2343'),
]
operations = [
migrations.AddField(
model_name='newsarticle',
name= | 'created',
field=models.DateTimeField(default=datetime.datetime(2014, 12, 29, 11, 11, 7, 540368, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='newsarticle',
name='slug',
field=models.SlugField(default=datetime.datetime(2014, 12, 29, 11, 11, 29, 101175, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='newsarticle',
name='updated',
field=models.DateTimeField(default=datetime.datetime(2014, 12, 29, 11, 11, 42, 82623, tzinfo=utc), auto_now=True),
preserve_default=False,
),
]
|
zb89/pyrogue | console.py | Python | unlicense | 1,242 | 0.024155 | from unicurses import *
class Console:
de | f __init__(self):
stdscr = initscr()
noecho()
| cbreak()
curs_set(False)
start_color()
use_default_colors()
init_pair( 0, COLOR_WHITE, COLOR_BLACK)
init_pair( 1, COLOR_RED, COLOR_BLACK)
init_pair( 2, COLOR_YELLOW, COLOR_BLACK)
init_pair( 3, COLOR_GREEN, COLOR_BLACK)
init_pair( 4, COLOR_CYAN, COLOR_BLACK)
init_pair( 5, COLOR_BLUE, COLOR_BLACK)
init_pair( 6, COLOR_MAGENTA, COLOR_BLACK)
init_pair( 7, COLOR_WHITE, COLOR_BLACK)
init_pair( 8, COLOR_RED, COLOR_BLACK)
init_pair( 9, COLOR_YELLOW, COLOR_BLACK)
init_pair(10, COLOR_GREEN, COLOR_BLACK)
init_pair(11, COLOR_CYAN, COLOR_BLACK)
init_pair(12, COLOR_BLUE, COLOR_BLACK)
init_pair(13, COLOR_MAGENTA, COLOR_BLACK)
def close(self):
nocbreak()
echo()
endwin()
def clear(self):
refresh()
clear()
def add_char(self, x, y, char):
move(y, x)
addstr(char)
def add_str(self, x, y, char):
move(y, x)
addstr(char)
def setcolor(self, n):
attron(color_pair(n) )
def unsetcolor(self, n):
attroff(color_pair(n) )
def setbold(self):
attron(A_BOLD)
def unsetbold(self):
attroff(A_BOLD)
|
DavidMcDonald1993/ghsom | laplacian.py | Python | gpl-2.0 | 376 | 0.007979 |
# codi | ng: utf-8
# In[9]:
import numpy as np
import networkx as nx
# In[2]:
G = nx.karate_club_graph()
# In[3]:
L = nx.laplacian_matrix(G)
# In[4]:
L
# In[11]:
d = np.array([v for k,v in nx.degr | ee(G).items()])
# In[12]:
d
# In[14]:
L.dot(d)
# In[16]:
e = np.array([v for k,v in nx.betweenness_centrality(G).items()])
# In[17]:
L.dot(e)
# In[ ]:
|
JeffRoy/mi-dataset | mi/dataset/driver/adcps_jln/stc/test/test_adcps_jln_stc_recovered_driver.py | Python | bsd-2-clause | 1,063 | 0.007526 |
__author__ = 'Mark Worden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.adcps_jln.stc.adcps_jln_stc_recovered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class SampleTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi', 'dataset', 'driver',
'adcps_jln', 'stc', 're | source',
'adcpt_20130929_091817.DAT')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePat | h, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one() |
barentsen/iphas-dr2 | scripts/summary.py | Python | mit | 1,682 | 0.001784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Prints a summary of the contents of the IPHAS source catalogue.
"""
import os
from astropy.io import fits
from astropy import log
import numpy as np
import sys
from dr2 import constants
n_sources = 0
n_r20 = 0
n_reliable = 0
n_deblend = 0
n_reliable_deblend = 0
n_pair = 0
n_saturated = 0
n_brightNeighb = 0
path = os.path.join(constants.DESTINATION, 'concatenated', 'full')
for filename in os.listdir(path):
if filename.endswith('fits.gz'):
print filename
myfile = os.path.join(path, filename)
log.info(myfile)
f = fits.open(myfile)
#n_sources += f[1].header['NAXIS2']
n_sources += f[1].data['ra'].size
n_r20 += (f[1].data['r'] < 21).sum()
n_reliable += f[1].data['reliable'].sum()
n_reliable_deblend += (f[1].data['reliable'] & f[1].data['deblend']).sum()
n_deblend += f[1].data['deblend'].sum()
n_pair += (f[1].data['sourceID2'] != ' ').sum()
n_saturated += f[1].data['saturated'].sum()
n_brightNeighb += f[1].data['b | rightNeighb'].sum()
print "{0} sources so far".format(n_sources)
with open('summary.txt', 'w') as out:
out.write("#Unique sources: {0}\n".format(n_sourc | es))
out.write("#Sources r < 21: {0}\n".format(n_r20))
out.write("#Reliable sources: {0}\n".format(n_reliable))
out.write("#Deblend sources: {0}\n".format(n_deblend))
out.write("#Reliable+deblend: {0}\n".format(n_reliable_deblend))
out.write("#Paired sources: {0}\n".format(n_pair))
out.write("#Saturated sources: {0}\n".format(n_saturated))
out.write("#Bright neighb sources: {0}\n".format(n_brightNeighb))
|
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/templatetags/tz.py | Python | mit | 5,400 | 0.000185 | from datetime import datetime, tzinfo
import pytz
from django.template import Library, Node, TemplateSyntaxError
from django.utils import timezone
register = Library()
# HACK: datetime instances cannot be assigned new attributes. Define a subclass
# in order to define new attributes in do_timezone().
class datetimeobject(datetime):
pass
# Template filters
@register.filter
def localtime(value):
"""
Convert a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Convert a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Convert a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, str):
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# | automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __ini | t__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Force or prevent conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enable a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, the default time zone is
used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Store the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
|
nirs/vdsm | tests/network/functional/conftest.py | Python | gpl-2.0 | 3,091 | 0 | #
# Copyright 2018-2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to th | e Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Fl | oor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from contextlib import contextmanager
from unittest import mock
import pytest
from . import netfunctestlib as nftestlib
from .netfunctestlib import NetFuncTestAdapter
from .netfunctestlib import Target
from vdsm.network import initializer
from vdsm.network.dhcp_monitor import MonitoredItemPool
def pytest_addoption(parser):
parser.addoption(
'--target-service', action='store_const', const=Target.SERVICE
)
parser.addoption('--target-lib', action='store_const', const=Target.LIB)
parser.addoption(
'--skip-stable-link-monitor', action='store_const', const=True
)
@pytest.fixture(scope='session', autouse=True)
def adapter(target):
yield NetFuncTestAdapter(target)
@pytest.fixture(scope='session', autouse=True)
def target(request):
target_lib = request.config.getoption('--target-lib')
target_service = request.config.getoption('--target-service')
if target_lib is None and target_service is None:
target_proxy = Target.SERVICE
elif target_lib == Target.LIB and target_service == Target.SERVICE:
raise Exception("error")
elif target_service == Target.SERVICE:
target_proxy = Target.SERVICE
elif target_lib == Target.LIB:
target_proxy = Target.LIB
return target_proxy
@pytest.fixture(scope='session', autouse=True)
def init_lib():
initializer.init_privileged_network_components()
@pytest.fixture(scope='session')
def skip_stable_link_monitor(request):
return request.config.getoption(
'--skip-stable-link-monitor', default=False
)
@pytest.fixture(scope='session', autouse=True)
def patch_stable_link_monitor(skip_stable_link_monitor):
if skip_stable_link_monitor:
with mock.patch.object(
nftestlib, 'monitor_stable_link_state', nullcontext
):
yield
return
yield
@pytest.fixture(scope='function', autouse=True)
def clear_monitor_pool():
yield
pool = MonitoredItemPool.instance()
if not pool.is_pool_empty():
# Some tests are not able to clear the pool
# (without running dhcp server).
# The same applies if the waiting for dhcp monitor times out.
pool.clear_pool()
@contextmanager
def nullcontext(*args, **kwargs):
yield
|
sheeprine/khal | tests/event_test.py | Python | mit | 11,219 | 0.000357 | # vim: set fileencoding=utf-8 :
import datetime
import textwrap
import pytest
import pytz
from khal.khalendar.event import Event
from .aux import normalize_component
def test_normalize_component():
assert normalize_component(textwrap.dedent("""
BEGIN:VEVENT
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
END:VEVENT
""")) != normalize_component(textwrap.dedent("""
BEGIN:VEVENT
DTSTART;TZID=Oyrope/Berlin;VALUE=DATE-TIME:20140409T093000
END:VEVENT
"""))
berlin = pytz.timezone('Europe/Berlin')
# the lucky people in Bogota don't know the pain that is DST
bogota = pytz.timezone('America/Bogota')
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
event_allday_template = u"""BEGIN:VEVENT
SEQUENCE:0
UID:uid3@host1.com
DTSTART;VALUE=DATE:{}
DTEND;VALUE=DATE:{}
SUMMARY:a meeting
DESCRIPTION:short description
LOCATION:LDB Lobby
END:VEVENT"""
event_dt = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
cal_dt = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTIMEZONE
TZID:Europe/Berlin
BEGIN:STANDARD
RDATE:20151025T020000
DTSTART;VALUE=DATE-TIME:20141026T020000
TZNAME:CET
TZOFFSETFROM:+0200
TZOFFSETTO:+0100
END:STANDARD
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20140330T030000
RDATE:20150329T030000
TZNAME:CEST
TZOFFSETFROM:+0100
TZOFFSETTO:+0200
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT
END:VCALENDAR
""".strip()
cal_dt_cet = [b'BEGIN:STANDARD',
b'DTSTART;VALUE=DATE-TIME:20141026T020000',
b'TZNAME:CET',
b'TZOFFSETFROM:+0200',
b'TZOFFSETTO:+0100',
b'END:STANDARD'
]
cal_dt_cest = [b'BEGIN:DAYLIGHT',
b'DTSTART;VALUE=DATE-TIME:20140330T030000',
b'RDATE:20150329T030000',
b'TZNAME:CEST',
b'TZOFFSETFROM:+0100',
b'TZOFFSETTO:+0200',
b'END:DAYLIGHT',
]
event_dt_two_tz = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=America/New_York;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
cal_dt_two_tz = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTIMEZONE
TZID:Europe/Berlin
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:20141026T020000
TZNAME:CET
TZOFFSETFROM:+0200
TZOFFSETTO:+0100
RDATE:20151025T020000
END:STANDARD
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20140330T030000
RDATE:20150329T030000
TZNAME:CEST
TZOFFSETFROM:+0100
TZOFFSETTO:+0200
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/New_York
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:20141102T010000
RDATE:20151101T010000
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20140309T030000
RDATE:20150308T030000
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=America/New_York;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT
END:VCALENDAR
""".strip()
event_no_dst = """
BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=America/Bogota;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=America/Bogota;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:event_no_dst
END:VEVENT
"""
cal_no_dst = u"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTIMEZONE
TZID:America/Bogota
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:19930403T230000
TZNAME:COT
TZOFFSETFRO | M:-0400
TZOFFSETTO:-0500
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=America/Bogota;VALUE=DATE-TIME:20140409T093000
DTEND;TZID=America/Bogota;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:event_no_dst
END:VEVENT
| END:VCALENDAR
""".split('\n')
event_dt_duration = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20140409T093000
DURATION:PT1H0M0S
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_dt_no_tz = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;VALUE=DATE-TIME:20140409T093000
DTEND;VALUE=DATE-TIME:20140409T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_dt_rr = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;VALUE=DATE-TIME:20140409T093000
DTEND;VALUE=DATE-TIME:20140409T103000
RRULE:FREQ=DAILY;COUNT=10
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_dt_rd = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;VALUE=DATE-TIME:20140409T093000
DTEND;VALUE=DATE-TIME:20140409T103000
RDATE;VALUE=DATE-TIME:20140410T093000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_dt_long = """BEGIN:VEVENT
SUMMARY:An Event
DTSTART;VALUE=DATE-TIME:20140409T093000
DTEND;VALUE=DATE-TIME:20140412T103000
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_d = """BEGIN:VEVENT
SUMMARY:Another Event
DTSTART;VALUE=DATE:20140409
DTEND;VALUE=DATE:20140410
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_d_long = """BEGIN:VEVENT
SUMMARY:Another Event
DTSTART;VALUE=DATE:20140409
DTEND;VALUE=DATE:20140412
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
event_d_rr = """BEGIN:VEVENT
SUMMARY:Another Event
DTSTART;VALUE=DATE:20140409
DTEND;VALUE=DATE:20140410
RRULE:FREQ=DAILY;COUNT=10
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT"""
cal_d = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
SUMMARY:Another Event
DTSTART;VALUE=DATE:20140409
DTEND;VALUE=DATE:20140410
DTSTAMP;VALUE=DATE-TIME:20140401T234817Z
UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU
END:VEVENT
END:VCALENDAR
""".split('\n')
locale = {
'default_timezone': berlin,
'local_timezone': berlin,
'dateformat': '%d.%m.',
'timeformat': '%H:%M',
'longdateformat': '%d.%m.%Y',
'datetimeformat': '%d.%m. %H:%M',
'longdatetimeformat': '%d.%m.%Y %H:%M',
}
event_kwargs = {'calendar': 'foobar', 'locale': locale}
def test_raw_dt():
event = Event(event_dt, **event_kwargs)
assert normalize_component(event.raw) == normalize_component(cal_dt)
assert event.compact(datetime.date(2014, 4, 9)) == u'09:30-10:30: An Event'
event = Event(event_dt, unicode_symbols=False, **event_kwargs)
assert event.compact(datetime.date(2014, 4, 9)) == u'09:30-10:30: An Event'
assert event.long() == u'09:30-10:30 09.04.2014: An Event'
assert event.recur is False
def test_raw_d():
event = Event(event_d, **event_kwargs)
assert event.raw.split('\r\n') == cal_d
assert event.compact(datetime.date(2014, 4, 9)) == u'Another Event'
assert event.long() == u'09.04.2014: Another Event'
def test_dt_two_tz():
event = Event(event_dt_two_tz, **event_kwargs)
assert normalize_component(cal_dt_two_tz) == normalize_component(event.raw)
# local (Berlin) time!
assert event.compact(datetime.date(2014, 4, 9)) == u'09:30-16:30: An Event'
assert event.long() == u'09:30-16:30 09.04.2014: An Event'
def test_event_dt_duration():
"""event has no end, but duration"""
event = Event(event_dt_duration, **event_kwargs)
assert event.compact(datetime.date(2014, 4, 9)) == u'09:30-10:30: An Event'
assert event.end == berlin.localize(datetime.datetime |
pastgift/seed-website-py | app/models.py | Python | mit | 4,907 | 0.004687 | # -*- coding: utf-8 -*-
import uuid
import hashlib
from datetime import datetime
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from app.exceptions import ValidationError
from . import db, login_manager
class User(db.Model):
__tablename__ = 'tb_users'
id = db.Column(db.String(64), primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
name = db.Column(db.UnicodeText(64))
status = db.Column(db.String(64), default='normal')
last_seen = db.Column(db.DateTime())
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
@property
def is_active(self):
return self.status == 'normal'
@property
def is_authenticated(self):
return self.is_active
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError("No `id` attribute - override get_id")
@property
def password(self):
raise AttributeError('Can not get password')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def is_admin(self):
return self.email == current_app.config['ADMIN_EMAIL']
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def can(self, action):
if self.is_admin() and action in current_app.config['ADMIN_DEFAULT_ACL_ACTIONS']:
return True
if UserAcl.query.filter_by(user_id=self.id, action=action).first():
return True
return False
def can_any(self, *actions):
for action in actions:
if self.can(action):
return True
else:
return False
def can_all(self, *actions):
for action in actions:
if not self.can(action):
return False
else:
return True
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return User(**kwargs)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.email
class AnonymousUser(AnonymousUserMixin):
def is_admin(self):
return False
def can(self, *args, **kwargs):
return False
can_any = can
can_all = can
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class UserAcl(db.Model):
__tablename__ = 'tb_user_acl'
id = db.Column(db.String(64), primary_key=True)
user_id = db.Column(db.String(64))
action = db.Column(db.String(128))
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(UserAcl, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return UserAcl(**kwargs)
def __repr__(self):
return '<UserAcl %r, %r>' % (self.user_id, self.action)
class OperationRecord(db.Model):
__tablename__ = 'tb_operation_records'
id = db.Column(db.String(64), primary_key=True)
use | r_id = db.Column(db.String(64))
operation_note = db.Column(db.Text())
created_timestamp = db.Column(db.D | ateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(OperationRecord, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return OperationRecord(**kwargs)
def __repr__(self):
return '<OperationRecord %r>' % self.user_id
|
rebase-helper/rebase-helper | containers/integration.py | Python | gpl-2.0 | 5,136 | 0.000974 | import cgi
import hashlib
import http.server
import io
import os
import posixpath
import ssl
import threading
import time
import urllib.parse
import pyftpdlib.authorizers
import pyftpdlib.handlers
import pyftpdlib.servers
class FTPServer:
def __init__(self, port, root, report_size):
class FTPHandlerNoSIZE(pyftpdlib.handlers.FTPHandler):
proto_cmds = {k: v for k, v in pyftpdlib.handlers.proto_cmds.items() if k != 'SIZE'}
authorizer = pyftpdlib.authorizers.DummyAuthorizer()
authorizer.add_anonymous(root)
handler = pyftpdlib.handlers.FTPHandler if report_size else FTPHandlerNoSIZE
handler.authorizer = authorizer
self.server = pyftpdlib.servers.FTPServer(('', port), handler)
def serve(self):
self.ser | ver.serve_forever()
class HTTPServer:
def __init__(self, port, cert, root, report_size):
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
path = self.path.split('?', 1)[0].split('#', 1)[0]
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
| path = os.path.join(root, path.lstrip('/'))
try:
with open(path, 'rb') as f:
data = f.read()
self.send_response(200)
content_type = 'application/json' if 'versioneers' in path else 'application/octet-stream'
self.send_header('Content-Type', content_type)
self.send_header('Content-Transfer-Encoding', 'binary')
if report_size:
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
except FileNotFoundError:
self.send_error(404)
def do_POST(self):
def dechunk(f):
bio = io.BytesIO()
while True:
chunksize = bytearray()
while not chunksize.endswith(b'\r\n'):
chunksize += f.read(1)
chunksize = chunksize.decode().split(':')[0]
chunksize = int(chunksize, 16)
if chunksize == 0:
break
chunk = f.read(chunksize)
assert(f.read(2) == b'\r\n')
bio.write(chunk)
bio.seek(0)
return bio
def verify_hash(f, hashtype, hsh):
try:
chksum = hashlib.new(hashtype)
except ValueError:
return False
chksum.update(f.read())
return chksum.hexdigest() == hsh
if self.headers.get('Transfer-Encoding') == 'chunked':
fp = dechunk(self.rfile)
else:
fp = self.rfile
data = cgi.FieldStorage(fp=fp, headers=self.headers,
environ={'REQUEST_METHOD': 'POST'},
# accept maximum of 10MB of data
limit=10 * 1024 * 1024)
try:
if 'filename' in data:
resp = b'Missing'
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(resp))
self.end_headers()
self.wfile.write(resp)
else:
hashtype = [k for k in data.keys() if k.endswith('sum')][0]
hsh = data[hashtype].value
hashtype = hashtype.split('sum')[0]
if verify_hash(data['file'].file, hashtype, hsh):
self.send_response(204)
self.end_headers()
else:
self.send_error(500)
except (KeyError, IndexError):
self.send_error(400)
self.server = http.server.HTTPServer(('', port), RequestHandler)
if cert:
self.server.socket = ssl.wrap_socket(self.server.socket, certfile=cert, server_side=True)
def serve(self):
self.server.serve_forever()
def main():
servers = [
FTPServer(2100, '/srv', True),
FTPServer(2101, '/srv', False),
HTTPServer(8000, None, '/srv', True),
HTTPServer(8001, None, '/srv', False),
HTTPServer(4430, '/cert.pem', '/srv', True),
HTTPServer(4431, '/cert.pem', '/srv', False),
]
threads = [threading.Thread(target=s.serve) for s in servers[1:]]
for t in threads:
t.setDaemon(True)
t.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
akanimax/CL-3_lab_2017 | Assignment_B2/Plag_checker.py | Python | gpl-3.0 | 2,002 | 0.050949 | #from app import app, lm
#from app import socketio
from flask import request, redirect, render_template, url_for, flash
from flask.ext.wtf import Form
from wtforms import StringField
from wtforms.validators import DataRequired
import os
from flask import Flask
class TextForm(Form):
"""Login form to access writing and settings pages"""
input_text = StringField('input_text', validators=[DataRequired()])
app = Flask(__name__)
app.secret_key="pict"
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top
APP_STATIC = os.path.join(APP_ROOT, 'static')
@app.route('/home',methods=['GET'])
def home_post():
form=TextForm(csrf_enabled=False)
return render_template('home.html',form=form)
@app.route('/home',methods=['POST'])
def checker():
form=TextForm(csrf_enabled=False)
text=form.input_text.data
repeated_sentences=0
text=text.split(". ")
#text=list(text)
print text
files=['file1.txt','file2.txt','file3.txt']
for x in files:
print x
with open(os.path.join(APP_STATIC, x)) as file1:
file11=file1.read()
file11=file11.split(". ")
print file11
for line1 in file11:
line1=line1.replace(".\n","")
line1=line1.replace(".","")
line1=line1.replace("\n","")
line1=line1.lower()
for line2 in text:
line2=line2.replace(".\n","")
line2=line2.replace(".","")
line2=line2.replace("\n","")
line2=line2.lower()
if line1==line2:
print line2+ " is repeated"
repeated_sentences+=1
print repeated_sentences
if repeated_sentences==0:
flash("Text not plagiarised",category="suc | cess")
else:
print "Text is plagiarised.\nApproximate plagiarism percentage: ",(float(repeated_sentences)/len(text))*100
flash_text="Text is plagiarised.\nApproximate plagia | rism percentage: "+ str((float(repeated_sentences)/len(text))*100)
flash(flash_text,category="success")
return render_template('home.html',form=form)
if __name__ == '__main__':
app.run(debug = True)
|
rollstudio/djangodash-2013 | bakehouse/wsgi.py | Python | mit | 1,439 | 0.00139 | """
WSGI config for bakehouse project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "bakehouse.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", | "bakehouse.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld | .wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
Krakn/learning | src/python/python_koans/python3/another_local_module.py | Python | isc | 280 | 0.010714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Goose:
@property
def name(self):
return "Mr Stabby"
class Hamster:
@p | roperty
def name(self):
return "Phil"
class _SecretSquirrel:
@property
def name(s | elf):
return "Mr Anonymous" |
pipermerriam/flex | flex/loading/schema/info.py | Python | mit | 933 | 0 | from flex.constants import (
STRING, OBJECT, EMAIL, URI
)
from flex.validation.common import (
generate_object_validator,
)
info_schema = {
'required': [
'title',
],
'properties': {
'title': {
'type': STRING,
},
'description': {'type': STRING},
'termsOfService': {'type': STRING},
'contact': {
'type': OBJECT,
'properties': {
'name': {'type': STRING},
'email': {'type': STRING, 'format': EMAIL},
'url': {'type': STRING, 'format': URI},
},
},
'license': {
'type' | : OBJECT,
'properties': {
'name': {'type': STRING},
'url': {'type': STRING, 'format': URI},
},
},
'versi | on': {'type': STRING},
}
}
info_validator = generate_object_validator(
schema=info_schema,
)
|
skjena/Assemblyx86 | changeOfBase/tester.py | Python | gpl-2.0 | 13,322 | 0.022069 | #this is the base class for tester objects
import sys
import subprocess
import logging
import os
from time import clock
from result import Result
studentLogger = logging.getLogger('errorLogger.progressLogger.studentLogger')
studentLogger.setLevel(logging.INFO)
h = logging.StreamHandler()
h.setLevel(logging.INFO)
studentLogger.addHandler(h)
class Tester(object):
"""
Tester
abstract class representing a testing interface
"""
__name__ = 'Tester' #the name of this class
INPUT_STDIN = 1 #input is expected to come via standard input
INPUT_CMDLINE = 2 #input is expected to comve via the command line
OUTPUT_STDOUT = 3 #output of executable will be to the standard output
OUTPUT_FILE = 4 #exectuables output will be a file
_PROGRAM_COMPLETED = 5 #the program completed the test
_PROGRAM_CRASHED = 6 #the program crashed during a test
_PROGRAM_TIMED_OUT = 7 #the program timed out during a test
def str2InputType(typename):
"""converts the string name of the input type to the internal type
@typename: the name of the type. either
stdin
cmdline
"""
if(typename.lower() == 'stdin'):
return Tester.INPUT_STDIN
elif(typename.lower() == 'cmdline'):
return Tester.INPUT_CMDLINE
else:
raise ValueError('Unknown input type ' + typename)
def str2OutputType(typename):
"""converts the string name of the output type to the internal type
@typename: the name of the type. either
stdout
file
"""
if(typename.lower() == 'stdout'):
return Tester.OUTPUT_STDOUT
elif(typename.lower() == 'file'):
return Tester.OUTPUT_FILE
else:
raise ValueError('Unknown input type ' + typename)
def __init__(self, executable,
usingCmdArgs, usingStdin, outputType,
inDir, solDir, scratchDir,
maxRunTime = 5, cmdArgs = None, lines2skip = 0):
"""
@executable: the name of the exectuable to be run
@usingCmdArgs: Are command line arguments being used?
@usingStdin: Will there be input from the standard input?
@outputType: how are outputs generated: Either
OUTPUT_STDOUT: for when the solution is sent to standard out or
OUTPUT_FILE: for when the solution is sent to a file
@inDir: the name of the directory containing the inputs to be used for testing
the naming convention for the tests contained within is testname-test.filetype
@solDir: the name of the directory containing the solutions
the naming convention for the solutions contained within is testname-sol.filetype
@scratchDir: directory to write scratch files in
@maxRunTime: the maximum number of seconds to run the program or
None to allow the program to run until completion (if it does not terminate the program will hang)
@cmdArgs: a list of additional command line arguments to the executable
@lines2skip: number of lines of output program and solution file to skip
"""
self.executable = executable
self.usingCmdArgs = usingCmdArgs
self.usingStdin = usingStdin
self.outputType = outputType
self.inDir = inDir
self.solDir = solDir
self.scratchDir = scratchDir
self.maxRunTime = maxRunTime
self.lines2skip = lines2skip
if cmdArgs == None:
self.cmdArgs = []
else:
self.cmdArgs = cmdArgs.copy()
self.testFiles = [self.inDir + os.sep +
test for test in os.listdir(inDir) if not test.startswith('.')] #get the tests in the test directory
self.testFiles.sort() #make the tests sorted
if(scratchDir == None):
self.userOut = None
else:
self.userOut = scratchDir + os.sep + 'userOut.txt' #file to temporarily store the use's output
self.startTime = 0 #when did the test begin running
self.endTime = 0 #when did the test end running
self.results = [] #the results of the testing
def _runOne(self, inFileName, outFileName = None):
"""run self.executable using the inputs contained in inFileName
@inFileName: the name of the file containing the inputs
@outFileName: the name of the file to write the program's stdout to if the solution is contained in the stdout
@returns: the success status of running the program
"""
#determine how to pass input file
infile = None #the input file to be used
additionalArgs = []
with open(inFileName) as infile:
if(self.usingCmdArgs): #using command line arguments
num_args = int(infile.readline()) #the first line contains the number of command line arguments
for i in range(num_args): #read the command arguments in
additionalArgs.append(infile.readline().strip())
#remaining lines in the file are considerd input to be given
#via standard input
#determine how outputs will be generated
outfile = None
if(self.outputType == Tester.OUTPUT_STDOUT): #outputting to stdout
outfile = open(outFileName,'w') #open a file to hold the results
elif(self.outputType == Tester.OUTPUT_FILE): #outputting to a file
raise NotImplementedError #nothing we can really do as of now
else:
raise NotImplementedError
#this clears out python's buffer so that the program run through subprocess
#actually gets input. Another fix if this stops working is to open the file in unbuffered mode
#http://stackoverflow.com/questions/22417010/subprocess-popen-stdin-read-file
infile.seek(infile.tell())
studentLogger.info('Preparing to test %s on %s', self.executable, os.path.basename(inFileName))
#start the clocks
self.endTime = clock()
self.startTime = clock()
#run the program
with subprocess.Popen([self.executable] + self.cmdArgs + additionalArgs,
stdin = infile,
stdout = outfile,
stderr = subprocess.PIPE,
universal_newlines = True) as program:
try:
program.wait(timeout = self.maxRunTime) #wait for the program to finish
self.endTime = clock() #program completed
err = '\t'.join(program.stderr.readlines()) #always have to read the pipes
if(program.returncode != 0):
studentLogger.warning('%s %s crashed for the following reasons:\n\t%s\n',
self.executable, ' '.join(self.cmdArgs), err)
return Tester._PROGRAM_CRASHED
else:
return Test | er._PROGRAM_COMPLETED
except subprocess.TimeoutExpired:
studentLogger.warning('%s %s timed out', ' '.join(self.cmdArgs), self.executable)
program.kill()
return Tester._PROGRAM_TIMED_OUT
#end _runOne
def testOne(self, inFile, solFile):
"""
run the executable using inFile as the inputs
and checking the output against sol | File
@inFile: the name of the file containing the inputs
@solFile: the name of the file containg the solution
@returns: a Result
"""
progStatus = self._runOne(inFile, self.userOut)#run the program
testName = os.path.basename(inFile) #the name of the test
if(progStatus == Tester._PROGRAM_CRASHED):
return Result(testName, False, 'Crashed')
elif(progStatus == Tester._PROGRAM_TIMED_OUT):
return Result(testName, False, 'Timed Out')
else: #program completed successfully
if(self.outputType == Tester.OUTPUT_STDOUT):
with open(self.userOut) as answer:
(correct, out, sol) = self._checkSolution(answer, solFile)
if(correct):
studentLogger.info('%s %s passed test %s',
self.executable, ' '.join(self.cmdArgs),
os.path.basename(inFile))
else:
first_diff = ''
i = 0
for (i,(o,s)) in enumerate(zip(out,sol)):
if o != s:
fir |
rtucker/imap2maildir | simpleimap.py | Python | mit | 17,416 | 0.002354 | """ simpleimap.py, originally from http://p.linode.com/2693 on 2009/07/22
Copyright (c) 2009 Timothy J Fontaine <tjfontaine@gmail.com>
Copyright (c) 2009 Ryan S. Tucker <rtucker@gmail.com>
"""
import email
import imaplib
import logging
import platform
import re
import time
class __simplebase:
""" __simple base
"""
def parseFetch(self, text):
"""Given a string (e.g. '1 (ENVELOPE...'), breaks it down into
a useful format.
Based on Helder Guerreiro <helder@paxjulia.com>'s
imaplib2 sexp.py: http://code.google.com/p/webpymail/
"""
literal_re = re.compile(r'^{(\d+)} ')
simple_re = re.compile(r'^([^ ()]+)')
quoted_re = re.compile(r'^"((?:[^"\\]|(?:\\\\)|\\"|\\)*)"')
pos = 0
length = len(text)
current = ''
result = []
cur_result = result
level = [ cur_result ]
# Scanner
while pos < length:
# Quoted literal:
if text[pos] == '"':
quoted = quoted_re.match(text[pos:])
if quoted:
cur_result.append( quoted.groups()[0] )
pos += quoted.end() - 1
# Numbered literal:
elif text[pos] == '{':
lit = literal_re.match(text[pos:])
if lit:
start = pos+lit.end()
end = pos+lit.end()+int(lit.groups()[0])
pos = end - 1
cur_result.append( text[ start:end ] )
# Simple literal
elif text[pos] not in '() ':
simple = simple_re.match(text[pos:])
if simple:
tmp = simple.groups()[0]
if tmp.isdigit():
tmp = int(tmp)
elif tmp == 'NIL':
tmp = None
cur_result.append( tmp )
pos += simple.end() - 1
# Level handling, if we find a '(' we must add another list, if we
# find a ')' we must return to the previous list.
elif text[pos] == '(':
cur_result.append([])
cur_result = cur_result[-1]
level.append(cur_result)
elif text[pos] == ')':
try:
cur_result = level[-2]
del level[-1]
except IndexError:
raise ValueError('Unexpected parenthesis | at pos %(pos)d text %(text)s' % {'pos':pos, 'text': text})
pos += 1
# We now have a list | of lists. Dict this a bit...
outerdict = self.__listdictor(result)
replydict = {}
for i in list(outerdict.keys()):
replydict[i] = self.__listdictor(outerdict[i])
return replydict
def __listdictor(self, inlist):
""" __listdictor
"""
outdict = {}
for i in range(0,len(inlist),2):
outdict[inlist[i]] = inlist[i+1]
return outdict
def parseInternalDate(self, resp):
"""Takes IMAP INTERNALDATE and turns it into a Python time
tuple referenced to GMT.
Based from: http://code.google.com/p/webpymail/
"""
Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
InternalDate = re.compile(
r'(?P<day>[ 0123]?[0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
)
mo = InternalDate.match(resp)
if not mo:
# try email date format (returns None on failure)
return email.utils.parsedate(resp)
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
zone = (zoneh*60 + zonem)*60
# handle negative offsets
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
def get_messages_by_folder(self, folder, charset=None, search='ALL'):
""" get messages by folder
"""
ids = self.get_ids_by_folder(folder, charset, search)
for m in self.get_messages_by_ids(ids):
yield m
def get_ids_by_folder(self, folder, charset=None, search='ALL'):
""" get ids by folder
"""
self.select(folder, readonly=True)
status, data = self.search(charset, search)
if status != 'OK':
raise Exception('search %s: %s' % (search, data[0]))
return data[0].split()
def get_uids_by_folder(self, folder, charset=None, search='ALL'):
""" get_uids by folders
"""
self.select(folder, readonly=True)
status, data = self.uid('SEARCH', charset, search)
if status != 'OK':
raise Exception('search %s: %s' % (search, data[0]))
return data[0].split()
def get_summaries_by_folder(self, folder, charset=None, search='ALL'):
""" get summaries by folder
"""
for i in self.get_uids_by_folder(folder, charset, search):
yield self.get_summary_by_uid(int(i))
def get_messages_by_ids(self, ids):
""" get messages by ids
"""
for i in ids:
yield self.get_message_by_id(int(i))
def get_message_by_id(self, id):
""" get_message_by_id
"""
status, data = self.fetch(int(id), '(RFC822)')
if status != 'OK':
raise Exception('id %s: %s' % (uid, data[0]))
return email.message_from_string(data[0][1])
def get_messages_by_uids(self, uids):
""" get messages by uids
"""
for i in uids:
yield self.get_message_by_uid(int(i))
def get_message_by_uid(self, uid):
""" get_message_by_uid
"""
status, data = self.uid('FETCH', uid, '(RFC822)')
if status != 'OK':
raise Exception('uid %s: %s' % (uid, data[0]))
return email.message_from_string(data[0][1])
def get_summaries_by_ids(self, ids):
""" get summaries by ids
"""
for i in ids:
yield self.get_summary_by_id(int(i))
def get_summary_by_id(self, id):
"""Retrieve a dictionary of simple header information for a given id.
Requires: id (Sequence number of message)
Returns: {'uid': UID you requested,
'msgid': RFC822 Message ID,
'size': Size of message in bytes,
'date': IMAP's Internaldate for the message,
'envelope': Envelope data}
"""
# Retrieve the message from the server.
status, data = self.fetch(id, '(UID ENVELOPE RFC822.SIZE INTERNALDATE)')
if status != 'OK':
return None
return self.parse_summary_data(data)
def get_uids_by_ids(self, ids):
""" get uids by ids
"""
for i in ids:
yield self.get_uid_by_id(int(i))
def get_uid_by_id(self, id):
"""Given a message number (id), returns the UID if it exists."""
status, data = self.fetch(int(id), '(UID)')
if status != 'OK':
raise Exception('id %s: %s' % (id, data[0]))
if data[0]:
uidrg = re.compile('.*?UID\\s+(\\d+)',re.IGNORECASE|re.DOTALL)
|
Flowdalic/bitcoin | test/functional/p2p_fingerprint.py | Python | mit | 5,771 | 0.00104 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generate(nblocks=10)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
| self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
sel | f.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generate(nblocks=1)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
|
dionyziz/thefuck | thefuck/rules/no_command.py | Python | mit | 848 | 0 | from difflib import get_close_matches
import os
from pathlib import Path
def _safe(fn, fallback):
try:
return fn()
except OSError:
return fallback
def _get_all_bins():
return [exe.name
for path in os.environ.get('PATH', '').split(':')
for exe in _safe(lambda: list(Path(path).iterdir()), [])
if not _safe(exe.is_dir, True)]
def match(command, settings):
return 'not found' in command.stderr and \
bool(ge | t_close_matches(command.script.split(' ')[0],
_get_all_bins()))
def get_new_command(command, settings):
old_command = command.script.split(' ')[0]
new_command = get_close_matches(old_command,
| _get_all_bins())[0]
return ' '.join([new_command] + command.script.split(' ')[1:])
|
StefanBruens/libsigrokdecode | decoders/cec/pd.py | Python | gpl-3.0 | 11,367 | 0.004487 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from .protocoldata import *
# Pulse types
class Pulse:
INVALID, START, ZERO, ONE = range(4)
# Protocol stats
class Stat:
WAIT_START, GET_BITS, WAIT_EOM, WAIT_ACK = range(4)
# Pulse times in milliseconds
timing = {
Pulse.START: {
'low': { 'min': 3.5, 'max': 3.9 },
'total': { 'min': 4.3, 'max': 4.7 }
},
Pulse.ZERO: {
'low': { 'min': 1.3, 'max': 1.7 },
'total': { 'min': 2.05, 'max': 2.75 }
},
Pulse.ONE: {
'low': { 'min': 0.4, 'max': 0.8 },
'total': { 'min': 2.05, 'max': 2.75 }
}
}
class ChannelError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'cec'
name = 'CEC'
longname = 'HDMI-CEC'
desc = 'HDMI Consumer Electronics Control (CEC) protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Display', 'PC']
channels = (
{'id': 'cec', 'name': 'CEC', 'desc': 'CEC bus data'},
)
annotations = (
('st', 'Start'),
('eom-0', 'End of message'),
('eom-1', 'Message continued'),
('nack', 'ACK not set'),
('ack', 'ACK set'),
('bits', 'Bits'),
('bytes', 'Bytes'),
('frames', 'Frames'),
('sections', 'Sections'),
('warnings', 'Warnings')
)
annotation_rows = (
('bits', 'Bits', (0, 1, 2, 3, 4, 5)),
('bytes', 'Bytes', (6,)),
('frames', 'Frames', (7,)),
('sections', 'Sections', (8,)),
('warnings', 'Warnings', (9,))
)
def __init__(self):
self.reset()
def precalculate(self):
# Restrict max length of ACK/NACK labels to 2 BIT pulses.
bit_time = timing[Pulse.ZERO]['total']['min'] * 2
self.max_ack_len_samples = round((bit_time / 1000) * self.samplerate)
def reset(self):
self.stat = Stat.WAIT_START
self.samplerate = N | one
self.fall_start = None
self.fall_end = None
self.rise = None
self.reset_frame_vars()
def reset_frame_vars(self):
self.eom = None
self.bit_count = 0
self.byte_count = 0
self.byte = 0
self.byte_start = None
self.frame_start = None
self.frame_end = None
| self.is_nack = 0
self.cmd_bytes = []
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.precalculate()
def handle_frame(self, is_nack):
if self.fall_start is None or self.fall_end is None:
return
i = 0
string = ''
while i < len(self.cmd_bytes):
string += '{:02x}'.format(self.cmd_bytes[i]['val'])
if i != (len(self.cmd_bytes) - 1):
string += ':'
i += 1
self.put(self.frame_start, self.frame_end, self.out_ann, [7, [string]])
i = 0
operands = 0
string = ''
while i < len(self.cmd_bytes):
if i == 0: # Parse header
(src, dst) = decode_header(self.cmd_bytes[i]['val'])
string = 'HDR: ' + src + ', ' + dst
elif i == 1: # Parse opcode
string += ' | OPC: ' + opcodes.get(self.cmd_bytes[i]['val'], 'Invalid')
else: # Parse operands
if operands == 0:
string += ' | OPS: '
operands += 1
string += '0x{:02x}'.format(self.cmd_bytes[i]['val'])
if i != len(self.cmd_bytes) - 1:
string += ', '
i += 1
# Header only commands are PINGS
if i == 1:
string += ' | OPC: PING' if self.eom else ' | OPC: NONE. Aborted cmd'
# Add extra information (ack of the command from the destination)
string += ' | R: NACK' if is_nack else ' | R: ACK'
self.put(self.frame_start, self.frame_end, self.out_ann, [8, [string]])
def process(self):
zero_time = ((self.rise - self.fall_start) / self.samplerate) * 1000.0
total_time = ((self.fall_end - self.fall_start) / self.samplerate) * 1000.0
pulse = Pulse.INVALID
# VALIDATION: Identify pulse based on length of the low period
for key in timing:
if zero_time >= timing[key]['low']['min'] and zero_time <= timing[key]['low']['max']:
pulse = key
break
# VALIDATION: Invalid pulse
if pulse == Pulse.INVALID:
self.stat = Stat.WAIT_START
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Invalid pulse: Wrong timing']])
return
# VALIDATION: If waiting for start, discard everything else
if self.stat == Stat.WAIT_START and pulse != Pulse.START:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected START: BIT found']])
return
# VALIDATION: If waiting for ACK or EOM, only BIT pulses (0/1) are expected
if (self.stat == Stat.WAIT_ACK or self.stat == Stat.WAIT_EOM) and pulse == Pulse.START:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected BIT: START received)']])
self.stat = Stat.WAIT_START
# VALIDATION: ACK bit pulse remains high till the next frame (if any): Validate only min time of the low period
if self.stat == Stat.WAIT_ACK and pulse != Pulse.START:
if total_time < timing[pulse]['total']['min']:
pulse = Pulse.INVALID
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['ACK pulse below minimun time']])
self.stat = Stat.WAIT_START
return
# VALIDATION / PING FRAME DETECTION: Initiator doesn't sets the EOM = 1 but stops sending when ack doesn't arrive
if self.stat == Stat.GET_BITS and pulse == Pulse.START:
# Make sure we received a complete byte to consider it a valid ping
if self.bit_count == 0:
self.handle_frame(self.is_nack)
else:
self.put(self.frame_start, self.samplenum, self.out_ann, [9, ['ERROR: Incomplete byte received']])
# Set wait start so we receive next frame
self.stat = Stat.WAIT_START
# VALIDATION: Check timing of the BIT (0/1) pulse in any other case (not waiting for ACK)
if self.stat != Stat.WAIT_ACK and pulse != Pulse.START:
if total_time < timing[pulse]['total']['min'] or total_time > timing[pulse]['total']['max']:
self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Bit pulse exceeds total pulse timespan']])
pulse = Pulse.INVALID
self.stat = Stat.WAIT_START
return
if pulse == Pulse.ZERO:
bit = 0
elif pulse == Pulse.ONE:
bit = 1
# STATE: WAIT START
if self.stat == Stat.WAIT_START:
self.stat = Stat.GET_BITS
self.reset_frame_vars()
self.put(self.fall_start, self.fall_end, self.out_ann, [0, ['ST']])
# STATE: GET BITS
elif self.stat == Stat.GET_BITS:
# Reset stats on first bit
if self.bit_count == 0:
self.byte_start = self.fall_start
self.byte = 0
# If 1st byte of the datagram save its sample num
if len( |
andim/scipydirect | doc/tutorialfig.py | Python | mit | 947 | 0.006336 | from __future__ import division
from scipydirect import minimize
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
import os
def obj(x):
"""Six-hump camelback function"""
x1 = x[0]
x2 = x[1]
f = (4 - 2.1*(x1*x1) + (x1*x1*x1*x1)/3.0)*(x1*x1) | + x1*x2 + (-4 + 4*(x2*x2))*(x2*x2)
return f
bounds = [(-3, 3), (-2, 2)]
res = minimize(obj, bounds)
#
# Plot the results.
#
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = res.x
X, Y = np.mgrid[x[0]-1:x[0]+1:50j, x[1]-1:x[1]+1:50j]
Z = np.zeros_like(X)
for i in range(X.size):
Z.ravel()[i] = obj([X.flatten()[i], Y.flatten()[i]])
ax.plot_wireframe(X, | Y, Z, rstride=1, cstride=1, cmap=cm.jet)
ax.scatter(x[0], x[1], res.fun, c='r', marker='o')
ax.set_title('Six-hump Camelback Function')
ax.view_init(30, 45)
fig.tight_layout()
plt.show()
|
wilbertom/fileconversions | tests/test_text_to_pdf_conversion.py | Python | mit | 295 | 0.00339 | from fileconversions.testing import ConversionTestCase
from fileconver | sions.conversions import TextToPdf
class TestTextToPdfConversion(Conve | rsionTestCase):
conversion_cls = TextToPdf
def test_command_is_installed(self):
self.assertCommandExists(self.conversion.command_name)
|
home-assistant/home-assistant | homeassistant/util/color.py | Python | apache-2.0 | 23,863 | 0.000377 | """Color util methods."""
from __future__ import annotations
import colorsys
import math
from typing import NamedTuple
import attr
# mypy: disallow-any-generics
class RGBColor(NamedTuple):
"""RGB hex values."""
r: int
g: int
b: int
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
"aliceblue": RGBColor(240, 248, 255),
"antiquewhite": RGBColor(250, 235, 215),
"aqua": RGBColor(0, 255, 255),
"aquamarine": RGBColor(127, 255, 212),
"azure": RGBColor(240, 255, 255),
"beige": RGBColor(245, 245, 220),
"bisque": RGBColor(255, 228, 196),
"black": RGBColor(0, 0, 0),
"blanchedalmond": RGBColor(255, 235, 205),
"blue": RGBColor(0, 0, 255),
"blueviolet": RGBColor(138, 43, 226),
"brown": RGBColor(165, 42, 42),
"burlywood": RGBColor(222, 184, 135),
"cadetblue": RGBColor(95, 158, 160),
"chartreuse": RGBColor(127, 255, 0),
"chocolate": RGBColor(210, 105, 30),
"coral": RGBColor(255, 127, 80),
"cornflowerblue": RGBColor(100, 149, 237),
"cornsilk": RGBColor(255, 248, 220),
"crimson": RGBColor(220, 20, 60),
"cyan": RGBColor(0, 255, 255),
"darkblue": RGBColor(0, 0, 139),
"darkcyan": RGBColor(0, 139, 139),
"darkgoldenrod": RGBColor(184, 134, 11),
"darkgray": RGBColor(169, 169, 169),
"darkgreen": RGBColor(0, 100, 0),
"darkgrey": RGBColor(169, 169, 169),
"darkkhaki": RGBColor(189, 183, 107),
"darkmagenta": RGBColor(139, 0, 139),
"darkolivegreen": RGBColor(85, 107, 47),
"darkorange": RGBColor(255, 140, 0),
"darkorchid": RGBColor(153, 50, 204),
"darkred": RGBColor(139, 0, 0),
"darksalmon": RGBColor(233, 150, 122),
"darkseagreen": RGBColor(143, 188, 143),
"darkslateblue": RGBColor(72, 61, 139),
"darkslategray": RGBColor(47, 79, 79),
"darkslategrey": RGBColor(47, 79, 79),
"darkturquoise": RGBColor(0, 206, 209),
"darkviolet": RGBColor(148, 0, 211),
"deeppink": RGBColor(255, 20, 147),
"deepskyblue": RGBColor(0, 191, 255),
"dimgray": RGBColor(105, 105, 105),
"dimgrey": RGBColor(105, 105, 105),
"dodgerblue": RGBColor(30, 144, 255),
"firebrick": RGBColor(178, 34, 34),
"floralwhite": RGBColor(255, 2 | 50, 240),
"forestgreen": RGBColor(34, 139, 34),
"fuchsia": RGBColor(255, 0, 255),
"gainsboro": RGBColor(220, 220, 220),
"ghostwhite": RGBColor(248, 248, 255),
"gold": RGBColor(255, 215, 0),
"goldenrod": RGBColor(218, 165, 32),
"gray": RGBColor(128, 128, 128),
"green": RGBColor(0, 128, 0),
"greenyellow": RGBColor(173, 255, 47),
| "grey": RGBColor(128, 128, 128),
"honeydew": RGBColor(240, 255, 240),
"hotpink": RGBColor(255, 105, 180),
"indianred": RGBColor(205, 92, 92),
"indigo": RGBColor(75, 0, 130),
"ivory": RGBColor(255, 255, 240),
"khaki": RGBColor(240, 230, 140),
"lavender": RGBColor(230, 230, 250),
"lavenderblush": RGBColor(255, 240, 245),
"lawngreen": RGBColor(124, 252, 0),
"lemonchiffon": RGBColor(255, 250, 205),
"lightblue": RGBColor(173, 216, 230),
"lightcoral": RGBColor(240, 128, 128),
"lightcyan": RGBColor(224, 255, 255),
"lightgoldenrodyellow": RGBColor(250, 250, 210),
"lightgray": RGBColor(211, 211, 211),
"lightgreen": RGBColor(144, 238, 144),
"lightgrey": RGBColor(211, 211, 211),
"lightpink": RGBColor(255, 182, 193),
"lightsalmon": RGBColor(255, 160, 122),
"lightseagreen": RGBColor(32, 178, 170),
"lightskyblue": RGBColor(135, 206, 250),
"lightslategray": RGBColor(119, 136, 153),
"lightslategrey": RGBColor(119, 136, 153),
"lightsteelblue": RGBColor(176, 196, 222),
"lightyellow": RGBColor(255, 255, 224),
"lime": RGBColor(0, 255, 0),
"limegreen": RGBColor(50, 205, 50),
"linen": RGBColor(250, 240, 230),
"magenta": RGBColor(255, 0, 255),
"maroon": RGBColor(128, 0, 0),
"mediumaquamarine": RGBColor(102, 205, 170),
"mediumblue": RGBColor(0, 0, 205),
"mediumorchid": RGBColor(186, 85, 211),
"mediumpurple": RGBColor(147, 112, 219),
"mediumseagreen": RGBColor(60, 179, 113),
"mediumslateblue": RGBColor(123, 104, 238),
"mediumspringgreen": RGBColor(0, 250, 154),
"mediumturquoise": RGBColor(72, 209, 204),
"mediumvioletred": RGBColor(199, 21, 133),
"midnightblue": RGBColor(25, 25, 112),
"mintcream": RGBColor(245, 255, 250),
"mistyrose": RGBColor(255, 228, 225),
"moccasin": RGBColor(255, 228, 181),
"navajowhite": RGBColor(255, 222, 173),
"navy": RGBColor(0, 0, 128),
"navyblue": RGBColor(0, 0, 128),
"oldlace": RGBColor(253, 245, 230),
"olive": RGBColor(128, 128, 0),
"olivedrab": RGBColor(107, 142, 35),
"orange": RGBColor(255, 165, 0),
"orangered": RGBColor(255, 69, 0),
"orchid": RGBColor(218, 112, 214),
"palegoldenrod": RGBColor(238, 232, 170),
"palegreen": RGBColor(152, 251, 152),
"paleturquoise": RGBColor(175, 238, 238),
"palevioletred": RGBColor(219, 112, 147),
"papayawhip": RGBColor(255, 239, 213),
"peachpuff": RGBColor(255, 218, 185),
"peru": RGBColor(205, 133, 63),
"pink": RGBColor(255, 192, 203),
"plum": RGBColor(221, 160, 221),
"powderblue": RGBColor(176, 224, 230),
"purple": RGBColor(128, 0, 128),
"red": RGBColor(255, 0, 0),
"rosybrown": RGBColor(188, 143, 143),
"royalblue": RGBColor(65, 105, 225),
"saddlebrown": RGBColor(139, 69, 19),
"salmon": RGBColor(250, 128, 114),
"sandybrown": RGBColor(244, 164, 96),
"seagreen": RGBColor(46, 139, 87),
"seashell": RGBColor(255, 245, 238),
"sienna": RGBColor(160, 82, 45),
"silver": RGBColor(192, 192, 192),
"skyblue": RGBColor(135, 206, 235),
"slateblue": RGBColor(106, 90, 205),
"slategray": RGBColor(112, 128, 144),
"slategrey": RGBColor(112, 128, 144),
"snow": RGBColor(255, 250, 250),
"springgreen": RGBColor(0, 255, 127),
"steelblue": RGBColor(70, 130, 180),
"tan": RGBColor(210, 180, 140),
"teal": RGBColor(0, 128, 128),
"thistle": RGBColor(216, 191, 216),
"tomato": RGBColor(255, 99, 71),
"turquoise": RGBColor(64, 224, 208),
"violet": RGBColor(238, 130, 238),
"wheat": RGBColor(245, 222, 179),
"white": RGBColor(255, 255, 255),
"whitesmoke": RGBColor(245, 245, 245),
"yellow": RGBColor(255, 255, 0),
"yellowgreen": RGBColor(154, 205, 50),
# And...
"homeassistant": RGBColor(3, 169, 244),
}
@attr.s()
class XYPoint:
"""Represents a CIE 1931 XY coordinate pair."""
x: float = attr.ib() # pylint: disable=invalid-name
y: float = attr.ib() # pylint: disable=invalid-name
@attr.s()
class GamutType:
"""Represents the Gamut of a light."""
# ColorGamut = gamut(xypoint(xR,yR),xypoint(xG,yG),xypoint(xB,yB))
red: XYPoint = attr.ib()
green: XYPoint = attr.ib()
blue: XYPoint = attr.ib()
def color_name_to_rgb(color_name: str) -> RGBColor:
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(" ", "").lower())
if not hex_value:
raise ValueError("Unknown color")
return hex_value
# pylint: disable=invalid-name
def color_RGB_to_xy(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert from RGB color to XY color."""
return color_RGB_to_xy_brightness(iR, iG, iB, Gamut)[:2]
# Taken from:
# https://github.com/PhilipsHue/PhilipsHueSDK-iOS-OSX/blob/00187a3/ApplicationDesignNotes/RGB%20to%20xy%20Color%20conversion.md
# License: Code is given as is. Use at your own risk and discretion.
def color_RGB_to_xy_brightness(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0 |
luci/luci-py | appengine/third_party/python-adb/adb/fastboot_debug.py | Python | apache-2.0 | 1,879 | 0.005322 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
# |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in co | mpliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastboot debugging binary.
Call it similar to how you call android's fastboot. Call it similar to how you
call android's fastboot, but this only accepts usb paths and no serials.
"""
from __future__ import absolute_import
import sys
import gflags
import progressbar
from . import common_cli
from . import fastboot
gflags.ADOPT_module_key_flags(common_cli)
FLAGS = gflags.FLAGS
def KwargHandler(kwargs, argspec):
if 'info_cb' in argspec.args:
# Use an unbuffered version of stdout.
def InfoCb(message):
if not message.message:
return
sys.stdout.write('%s: %s\n' % (message.header, message.message))
sys.stdout.flush()
kwargs['info_cb'] = InfoCb
if 'progress_callback' in argspec.args:
bar = progressbar.ProgessBar(
widgets=[progressbar.Bar(), progressbar.Percentage()])
bar.start()
def SetProgress(current, total):
bar.update(current / total * 100.0)
if current == total:
bar.finish()
kwargs['progress_callback'] = SetProgress
def main(argv):
common_cli.StartCli(
argv, fastboot.FastbootCommands.ConnectDevice,
list_callback=fastboot.FastbootCommands.Devices,
kwarg_callback=KwargHandler)
if __name__ == '__main__':
main(FLAGS(sys.argv))
|
FePhyFoFum/quartetsampling | pysrc/phylo/tree_reader.py | Python | gpl-3.0 | 3,393 | 0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
"""
this takes a newick string as instr
and reads the string and makes the
nodes and returns the root node
http://www.github.com/FePhyFoFum/quartetsampling
This file is part of 'quartetsampling'.
'quartetsampling' is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
'quartetsampling' is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with 'quartetsampling'. If not, see <http://www.gnu.org/licenses/>.
"""
from .node import Node
def read_tree_string(instr):
root = None
index = 0
nextchar = instr[index]
start = True
keepgoing = True
curnode = None
while keepgoing is True:
if nextchar is "(":
if start is True:
root = Node()
curnode = root
start = False
else:
newnode = Node()
curnode.add_child(newnode)
curnode = newnode
elif nextchar is ',':
curnode = curnode.parent
elif nextchar is ")":
curnode = curnode.parent
index += 1
nextchar = instr[index]
name = ""
while True:
if nextchar is ',' or nextchar is ')' or nextchar is ':' \
or nextchar is ';' or nextchar is '[':
break
name += nextchar
index += 1
nextchar = instr[index]
curnode.label = name
index -= 1
elif nextchar == ';':
keepgoing = False
break
elif nextchar == ":":
index += 1
nextchar = instr[index]
brlen = ""
while True:
| if nextchar is ',' or nextchar == ')' or nextchar is ':' \
or nextchar is ';' or nextchar is '[':
break
brlen += nextchar
index += 1
nextchar = instr[index]
curnode.length = float(brlen)
index -= 1
elif nextchar == ' ':
in | dex += 1
nextchar = instr[index]
else: # this is an external named node
newnode = Node()
curnode.add_child(newnode)
curnode = newnode
curnode.istip = True
name = ""
while True:
if nextchar is ',' or nextchar == ')' or nextchar is ':' \
or nextchar is ';' or nextchar is '[':
break
name += nextchar
index += 1
nextchar = instr[index]
curnode.label = name
index -= 1
if index < len(instr) - 1:
index += 1
nextchar = instr[index]
return root
if __name__ == "__main__":
treestring = "(a:3,(b:1e-05,c:1.3)int_|_and_33.5:5)root;"
xnode = read_tree_string(treestring)
print(xnode.get_newick_repr(True))
|
martakus/advent-of-code | y2017/tests/test5.py | Python | mit | 274 | 0 | import unittest |
from y2017.day5 import *
class TestDay5(unittest.TestCase):
def test_part_A(self):
self.assertEqual(jump_increment('0\n3\n0\n1\n-3\n'), 5)
def test_part_B(self):
self.assertEqual(jump_conditional_increment('0\n3\n0\n1\n-3\n' | ), 10)
|
bryteise/ister | vm-installation-image-post-update-version.py | Python | gpl-3.0 | 3,122 | 0.001281 | #!/usr/bin/python3
import os
import sys
INSTALLER_VERSION = '"latest"'
def create_installer_config(path):
"""Create a basicl installation configuration file"""
config = u"template=file:///etc/ister.json\n"
jconfig = u'{"DestinationType" : "physical", "PartitionLayout" : \
[{"disk" : "vda", "partition" : 1, "size" : | "512M", "type" : "EFI"}, \
{"disk" : "vda", "partition" : 2, \
"size" : "512M", "type" : "swap"}, {"disk" : "vda", "partition" : 3, \
"size" : "rest", "type" : "linux"}], \
"FilesystemTypes" : \
[{"disk" : "vda", "partition" : 1, "type" : "vfat"}, \
{"disk" : "vda", "partition" : 2, "type" : "swap"}, \
{"disk" : "vda", "partition" : 3, "type" : "ext4"}], \
"PartitionMountPoints" : \
[{"disk" : "vda", "partition" : 1, "mount" : | "/boot"}, \
{"disk" : "vda", "partition" : 3, "mount" : "/"}], \
"Version" : 0, "Bundles" : ["kernel-native", "telemetrics", "os-core", "os-core-update"]}\n'
if not os.path.isdir("{}/etc".format(path)):
os.mkdir("{}/etc".format(path))
with open("{}/etc/ister.conf".format(path), "w") as cfile:
cfile.write(config)
with open("{}/etc/ister.json".format(path), "w") as jfile:
jfile.write(jconfig.replace('"Version" : 0',
'"Version" : ' + INSTALLER_VERSION))
def append_installer_rootwait(path):
"""Add a delay to the installer kernel commandline"""
entry_path = path + "/boot/loader/entries/"
entry_file = os.listdir(entry_path)
if len(entry_file) != 1:
raise Exception("Unable to find specific entry file in {0}, "
"found {1} instead".format(entry_path, entry_file))
file_full_path = entry_path + entry_file[0]
with open(file_full_path, "r") as entry:
entry_content = entry.readlines()
options_line = entry_content[-1]
if not options_line.startswith("options "):
raise Exception("Last line of entry file is not the kernel "
"commandline options")
# Account for newline at the end of the line
options_line = options_line[:-1] + " rootwait\n"
entry_content[-1] = options_line
os.unlink(file_full_path)
with open(file_full_path, "w") as entry:
entry.writelines(entry_content)
def disable_tty1_getty(path):
"""Add a symlink masking the systemd tty1 generator"""
os.makedirs(path + "/etc/systemd/system/getty.target.wants")
os.symlink("/dev/null", path + "/etc/systemd/system/getty.target.wants/getty@tty1.service")
def add_installer_service(path):
os.symlink("{}/usr/lib/systemd/system/ister.service"
.format(path),
"{}/usr/lib/systemd/system/multi-user.target.wants/ister.service"
.format(path))
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit(-1)
try:
create_installer_config(sys.argv[1])
append_installer_rootwait(sys.argv[1])
disable_tty1_getty(sys.argv[1])
add_installer_service(sys.argv[1])
except Exception as exep:
print(exep)
sys.exit(-1)
sys.exit(0)
|
duahimanshu100/credit_computing_machine | credit_computing_machine/app/models.py | Python | mit | 3,284 | 0.008222 | from django.db import models
from credit_computing_machine.models import TimestampModel
from privateurl.models import PrivateUrl
from django.db.models.signals import pre_save
from django.dispatch import receiver
# Create your models here.
class CreditManager(models.Manager):
def get_credit_all_user(self, credit_group_id):
return CreditUser.objects.filter(credit_group=credit_group_id)
def get_credit_admin_user(self, credit_group_id):
return CreditUser.objects.filter(credit_group=credit_group_id, is_admin=True)
def get_credit_non_admin_user(self, credit_group_id):
return CreditUser.objects.filter(credit_group=credit_group_id, is_admin=False)
def get_dict_scores(self,credit_group_id):
users = CreditGroup.objects.get_credit_non_admin_user(credit_group_id).filter(is_diclined=False)
credit_scores = CreditScore.objects.filter(credit_group_id=credit_group_id)
dict_scores = {}
for user in users:
dict_scores[user.email] = {}
user_scores = credit_scores.filter(from_credit_user = user)
for user_score in user_scores:
dict_scores[user.email][user_score.to_credit_user.email] = user_score.score
return dict_scores
class CreditGroup(TimestampModel):
name = models.CharField(max_length=200)
is_deleted = models.BooleanField(default=False)
privateurl = models.ForeignKey(PrivateUrl,null=True)
objects = CreditManager()
def __str__(self):
return self.name
@receiver(pre_save, sender=CreditGroup, dispatch_uid="add_group_purl")
def add_group_purl(sender, instance, **kwargs):
if not instance.privateurl:
purl = PrivateUrl.create('manage_credit_score')
instance.privateurl= purl
class CreditUser(TimestampModel):
name = models.CharField(max_length=200)
email = models.EmailField()
score = models.FloatField(default=0)
credit_group = models.ForeignKey(CreditGroup, null=True, blank=True, related_name='credit_users')
is_admin = models.BooleanField(default=False)
privateurl = models.ForeignKey(PrivateUrl, null=True)
is_submitted = models.BooleanField(default=False)
is_diclined = models.BooleanField(default=False)
objects = CreditManager()
def __str__(self):
return '%s from %s'%(self.name,self.credit_group)
class Meta:
unique_together = ('credit_group', 'email','is_admin')
@receiver(pre_save, sender=CreditUser, dispatch_uid="add_user_purl")
def add_user_purl(sender, instance, **kwargs):
if not instance.privateurl:
purl = PrivateUrl.create('user_credit')
instance.privateurl= purl
|
class CreditScore(TimestampModel):
score = models.FloatField()
from_credit_user = models.ForeignKey(
CreditUser, related_name='from_credit_user')
to_credit_user = models.ForeignKey(
CreditUser, related_name='to_credit_user')
credit_group = models.ForeignKey(CreditGroup)
objects = CreditManager()
@property
def to_credit_user_name(self):
return self.to_credit_user.name
| @property
def to_credit_user_email(self):
return self.to_credit_user.email
def __str__(self):
return '%s to %s from %s'%(self.from_credit_user,self.to_credit_user,self.credit_group)
|
jaeilepp/eggie | mne/commands/utils.py | Python | bsd-2-clause | 1,576 | 0.005076 | #emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
"""Some utility functions for commands (e.g. for cmdline handling)
"""
# Authors: Yaroslav Halchenko <debian@onerussian.com>
#
# License: BSD (3-clause)
import imp, os, re
from optparse import OptionParser
from subprocess import Popen, PIPE
import mne
def get_optparser(cmdpath):
"""Create OptionParser with cmd source specific settings (e.g. prog value)
"""
command = os.path.basen | ame(cmdpath)
if re.match('mne_(.*).py', comman | d):
command = command[4:-3]
# Fetch description
mod = imp.load_source('__temp', cmdpath)
if mod.__doc__:
doc, description, epilog = mod.__doc__, None, None
doc_lines = doc.split('\n')
description = doc_lines[0]
if len(doc_lines) > 1:
epilog = '\n'.join(doc_lines[1:])
# monkey patch OptionParser to not wrap epilog
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(prog="mne %s" % command,
version=mne.__version__,
description=description,
epilog=epilog)
return parser
def get_status_output(cmd):
""" Replacement for commands.getstatusoutput which has been deprecated since 2.6
Returns the error status, output and error output"""
pipe = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
output, error = pipe.communicate()
status = pipe.returncode
return status, output, error
|
jbergantine/django-blog | django_blog/migrations/0001_initial.py | Python | mit | 2,509 | 0.004783 | # Generated by Django 3.0 on 2019-12-03 17:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=T | rue, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Maximum 250 characters.', max_length=250)),
('slug', models.SlugField(help_text='Suggested value automatically generated from title. Must be unique.', unique=True)),
],
options={
'verbose_name_plural': 'Categories',
'ordering': ['title'],
},
),
migrations.Crea | teModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(help_text='Suggested value automatically generated from title. Must be unique.', unique_for_date='pub_date')),
('body', models.TextField(help_text='Use Markdown to mark this up. http://daringfireball.net/projects/markdown/syntax')),
('body_html', models.TextField(blank=True, editable=False)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.IntegerField(choices=[(1, 'Live'), (2, 'Draft'), (3, 'Hidden')], default=1)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_blog.Author')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_blog.Category')),
],
options={
'verbose_name_plural': 'Entries',
'ordering': ['-pub_date'],
},
),
]
|
eroicaleo/LearningPython | interview/leet/137_Single_Number_II.py | Python | mit | 1,076 | 0.027881 | #!/usr/bin/env python
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sum1, sum2, lb = 0, 0, min(nums)
mask1 = int('55555555', base=16)
mask2 = int('AAAAAAAA', base=16)
for num in nums:
sum1 += ((num - lb) & mask1)
sum2 += ((num - lb) & mask2) >> 1
# print('before mod: ', num, bin(sum1)[2:], bin(sum2)[2:])
for i in range(16):
if (sum1 >> 2*i) & 3 == 3:
sum1 ^= (3 << 2*i)
if (sum2 >> 2*i) & 3 == 3:
sum2 ^= (3 << 2*i)
| # print('after mod: ', num, bin(sum1)[2:], bin(sum2)[2:])
return sum1 + (sum2 << 1) + lb
mask1 = int('55555555', base=16)
mask2 = int('AAAAAAAA', base=16)
nums1 = 2
int1 = (nums1 & mask1)
int2 = (nums1 & mask2) >> 1
nums = [0,1,0,1,0,1,99]
nums = [2,2,3,2]
nums = [2,2,5,5,1,5,1,1,0,2]
nums = [-2,-2,1,1,-3,1,-3,-3, | -4,-2]
print(int1)
print(int2)
sol = Solution()
print(sol.singleNumber(nums))
|
qedsoftware/commcare-hq | corehq/apps/fixtures/download.py | Python | bsd-3-clause | 13,120 | 0.002058 | from cStringIO import StringIO
from couchdbkit import ResourceNotFound
from datetime import datetime, timedelta
from django.template.defaultfilters import yesno
from django.utils.translation import ugettext as _
from corehq.apps.fixtures.exceptions import FixtureDownloadError
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem, _id_from_doc
from corehq.apps.fixtures.upload import DELETE_HEADER
from couchexport.export import export_raw
from couchexport.models import Format
from soil import DownloadBase
from soil.util import expose_cached_download
def prepare_fixture_download(table_ids, domain, task, download_id):
"""Prepare fixture data for Excel download
"""
data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task)
header_groups = [("types", excel_sheets["types"]["headers"])]
value_groups = [("types", excel_sheets["types"]["rows"])]
for data_type in data_types_book:
header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))
file = StringIO()
format = Format.XLS_2007
export_raw(tuple(header_groups), tuple(value_groups), file, format)
return expose_cached_download(
file.getvalue(),
60 * 60 * 2,
file_extension=".xlsx",
mimetype=Format.from_format(format).mimetype,
content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
download_id=download_id,
)
def prepare_fixture_html(table_ids, domain):
"""Prepare fixture data for HTML view
"""
return _prepare_fixture(table_ids, domain, html_response=True)[1]
def _prepare_fixture(table_ids, domain, html_response=False, task=None):
if table_ids and table_ids[0]:
try:
data_types_view = [FixtureDataType.get(id) for id in table_ids]
except ResourceNotFound:
if html_response:
raise FixtureDownloadError(
_("Sorry, we couldn't find that table. If you think this "
"is a mistake please report an issue."))
data_types_view = FixtureDataType.by_domain(domain)
else:
data_types_view = FixtureDataType.by_domain(domain)
if html_response:
data_types_view = list(data_types_view)[0:1]
total_tables = len(data_types_view)
# when total_tables < 4 the final percentage can be >= 100%, but for
# a small number of tables it renders more accurate progress
total_events = (total_tables + (0 if total_tables < 4 else 1)) * 10
now = datetime.utcnow
last_update = [now()]
upate_period = timedelta(seconds=1) # do not update progress more than once a second
def _update_progress(event_count, item_count, items_in_table):
if task and now() - last_update[0] > upate_period:
last_update[0] = now()
processed = event_count * 10 + (10. * item_count / items_in_table)
processed = min(processed, total_events) # limit at 100%
DownloadBase.set_progress(task, processed, total_events)
# book-keeping data from view_results for repeated use
data_types_book = []
data_items_book_by_type = {}
item_helpers_by_type = {}
"""
Contains all excel sheets in following format
excel_sheets = {
"types": {
"headers": [],
"rows": [(row), (row), (row)]
}
"next-sheet": {
"headers": [],
"rows": [(row), (row), (row)]
},
...
}
"""
excel_sheets = {}
def empty_padding_list(length):
return ["" for x in range(0, length)]
max_fields = 0
max_item_attributes = 0
"""
- Helper to generate headers like "field 2: property 1"
- Captures max_num_of_properties for any field of any type at the list-index.
Example values:
[0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
[1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
[0, 2] -> "field 2: property 1", "field 2: property 2"
"""
field_prop_count = []
"""
captures all possible 'field-property' values for each data-type
Example value
{
u'clinics': {'field 2 : property 1': u'lang'},
u'growth_chart': {'field 2 : property 2': u'maxWeight'}
}
"""
type_field_properties = {}
get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y)
for event_count, data_type in enumerate(data_types_view):
# Helpers to generate 'types' sheet
type_field_properties[data_type.tag] = {}
data_types_book.append(data_type)
if len(data_type.fields) > max_fields:
max_fields = len(data_type.fields)
if len(data_type.item_attributes) > max_item_attributes:
max_item_attributes = len(data_type.item_attributes)
for index, field in enumerate(data_type.fields):
if len(field_prop_count) <= index:
field_prop_count.append(len(field.properties))
elif field_prop_count[index] <= len(field.properties):
field_prop_count[index] = len(field.properties)
if len(field.properties) > 0:
for prop_index, property in enumerate(field.properties):
prop_key = get_field_prop_format(index + 1, prop_index + 1)
type_field_properties[data_type.tag][prop_key] = property
# Helpers to generate item-sheets
data_items_book_by_type[data_type.tag] = []
max_users = 0
max_groups = 0
max_locations = 0
max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes}
fixture_data = FixtureDataItem.by_data_type(domain, data_type.get_id)
num_rows = len(fixture_data)
for n, item_row in enumerate(fixture_data):
_update_progress(event_count, n, num_rows)
data_items_book_by_type[data_type.tag].append(item_row)
max_groups = max(max_groups, len(item_row.groups))
max_users = max(max_users, len(item_row.users))
max_locations = max(max_locations, len(item_row.locations))
for field_key in item_row.fields:
if field_key in max_field_prop_combos:
max_combos = max_field_prop_combos[field_key]
cur_combo_len = len(item_row.fields[field_key].field_list)
max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
| max_field_pro | p_combos[field_key] = max_combos
item_helpers = {
"max_users": max_users,
"max_groups": max_groups,
"max_locations": max_locations,
"max_field_prop_combos": max_field_prop_combos,
}
item_helpers_by_type[data_type.tag] = item_helpers
# Prepare 'types' sheet data
types_sheet = {"headers": [], "rows": []}
types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)])
types_sheet["headers"].extend(["property %d" % x for x in range(1, max_item_attributes + 1)])
field_prop_headers = []
for field_num, prop_num in enumerate(field_prop_count):
if prop_num > 0:
for c in range(0, prop_num):
prop_key = get_field_prop_format(field_num + 1, c + 1)
field_prop_headers.append(prop_key)
types_sheet["headers"].append(prop_key)
for data_type in data_types_book:
common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
field_vals = ([field.field_name for field in data_type.fields]
+ empty_padding_list(max_fields - len(data_type.fields)))
item_att_vals = (data_type.item_attributes + empty_padding_list(
max_item_attributes - len(data_type.item_attributes)
))
prop_vals = []
if data_ty |
F5Networks/f5-openstack-agent | f5_openstack_agent/utils/clean_partition.py | Python | apache-2.0 | 2,380 | 0 | # coding=utf-8
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility | to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file) | )
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
rgeyer/terminator_for_rs_ss | skynet/urls.py | Python | mit | 1,885 | 0.001061 | """skynet URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1 | . Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to url | patterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from www.controllers.web import main
from www.controllers.web.rightscale.account.new import new
from www.controllers.web.rightscale.account.account import account
from www.controllers.api.session import session, session_viewset
from www.controllers.api.resource import resource_viewset
from www.controllers.api.module import module
from rest_framework import routers
api_router = routers.DefaultRouter()
api_router.register(r'session', session_viewset)
api_router.register(r'resource', resource_viewset)
urlpatterns = [
url(r'^api2/', include(api_router.urls)),
url('', include('social.apps.django_app.urls', namespace='social')),
url('', include('django.contrib.auth.urls', namespace='auth')),
url(r'^api/sessions/(?P<sessionid>[0-9a-zA-Z\-]*)/modules/(?P<module>[a-z]*)$', module.as_view()),
url(r'^api/sessions/(?P<sessionid>[0-9a-zA-Z\-]*)/(?P<action>[a-z]*)$', session.as_view()),
url(r'^admin/', admin.site.urls),
url(r'^rightscale/accounts/new', new.as_view()),
url(r'^rightscale/accounts/(?P<id>[0-9]*)', account.as_view()),
url(r'^', main.index, name='dashboard'),
]
|
hyounggyu/datamanager | datamanager/create.py | Python | gpl-2.0 | 6,741 | 0.00356 | import os
import getopt
import re
import sys
import time
from pathlib import Path
from PyQt4 import QtCore, QtGui
from xni.manage import dataset
class Worker(QtCore.QObject):
finished = QtCore.pyqtSignal()
relay = QtCore.pyqtSignal(int)
isFinished = False
def __init__(self, output, images, bgnds=[], darks=[], parent=None):
super(Worker, self).__init__(parent)
self.output = output
self.images = images
self.bgnds = bgnds
self.darks = darks
def process(self):
map_obj = dataset.create(self.output, self.images, self.bgnds, self.darks)
for i, _ in map_obj:
if self.isFinished == True:
break
self.relay.emit(i)
QtGui.QApplication.processEvents()
self.finished.emit()
def stop(self):
self.isFinished = True
class CreateWindow(QtGui.QMainWindow):
_dir = None
output = None
def __init__(self, parent=None):
super(CreateWindow, self).__init__(parent)
self.initUI()
def initUI(self):
self.srcdirLabel = QtGui.QLabel('Source directory')
self.srcdirLabel.setFixedWidth(200)
self.srcdirBtn = QtGui.QPushButton('Select')
self.srcdirBtn.clicked.connect(self.selectSourceDirectory)
self.prefixLabel = QtGui.QLabel('Image file prefix')
self.prefixEdit = QtGui.QLineEdit()
self.prefixEdit.textChanged[str].connect(self.countImages)
self.prefixEdit.textEdited[str].connect(self.countImages)
self.bgndprefixLabel = QtGui.QLabel('Background image file prefix')
self.bgndprefixEdit = QtGui.QLineEdit()
self.bgndprefixEdit.textChanged[str].connect(self.countImages)
self.bgndprefixEdit.textEdited[str].connect(self.countImages)
self.darkprefixLabel = QtGui.QLabel('Dark image file prefix')
self.darkprefixEdit = QtGui.QLineEdit()
self.darkprefixEdit.textChanged[str].connect(self.countImages)
self.darkprefixEdit.textEdited[str].connect(self.countImages)
self.tgtfileLabel = QtGui.QLabel('Target file name')
self.tgtfileBtn = QtGui.QPushButton('Select')
self.tgtfileBtn.clicked.connect(self.selectTargetFilename)
grid1 = QtGui.QGridLayout()
grid1.setSpacing(10)
grid1.addWidget(self.srcdirLabel, 0, 0)
grid1.addWidget(self.srcdirBtn, 0, 1)
grid1.addWidget(self.prefixLabel, 1, 0)
grid1.addWidget(self.prefixEdit, 1, 1)
grid1.addWidget(self.bgndprefixLabel, 2, 0)
grid1.addWidget(self.bgndprefixEdit, 2, 1)
grid1.addWidget(self.darkprefixLabel, 3, 0)
grid1.addWidget(self.darkprefixEdit, 3, 1)
grid1.addWidget(self.tgtfileLabel, 4, 0)
grid1.addWidget(self.tgtfileBtn, 4, 1)
group1 = QtGui.QGroupBox('Source Configuration')
group1.setLayout(grid1)
self.runBtn = QtGui.QPushButton('Create dataset')
self.runBtn.clicked.connect(self.run)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.runBtn)
centralWidget = QtGui.QWidget(self)
vbox = QtGui.QVBoxLayout(centralWidget)
vbox.addStretch(1)
vbox.addWidget(group1)
vbox.addLayout(hbox1)
self.setCentralWidget(centralWidget)
self.setWindowTitle('Create Dataset')
self.statusBar().showMessage('Ready')
self.show()
def selectSourceDirectory(self):
_dir = QtGui.QFileDialog.getExistingDirectory(self, caption="Select source directory")
if _dir != '':
self._dir = _dir
self.statusBar().showMessage('{} directory selected.'.format(os.path.basename(self._dir)))
def selectTargetFilename(self):
fn = QtGui.QFileDialog.getSaveFileName(self, caption="Select target file")
if fn != '':
self.output = fn
self.statusBar().showMessage('{} file selected.'.format(os.path.basename(self.output)))
def _list(self, prefix):
if self._dir == None or self._dir == '':
return []
pattern = '^{}.*(tif|t | iff)$'.format(prefix)
match = re.compile(pattern, re.I).match
result = []
for fn in os.listdir(self._dir):
fn = os.path.no | rmcase(fn)
if match(fn) is not None:
result.append(os.path.join(self._dir, fn))
return sorted(result)
def countImages(self, prefix):
fns = self._list(prefix)
self.statusBar().showMessage('{} image files selected.'.format(len(fns)))
def run(self):
image_prefix = self.prefixEdit.text()
images = self._list(image_prefix)
if image_prefix == '':
self.warning('Image prefix is empty.')
if len(images) == 0:
self.warning('Can not find images.')
return
bgnd_prefix = self.bgndprefixEdit.text()
dark_prefix = self.darkprefixEdit.text()
bgnds = self._list(bgnd_prefix) if bgnd_prefix != '' else []
darks = self._list(dark_prefix) if dark_prefix != '' else []
if self.output == None:
self.warning('Target file is None.')
return
ret = self.confirm(self.output, images, bgnds, darks)
if ret == QtGui.QMessageBox.Ok:
self.thread = QtCore.QThread()
self.worker = Worker(self.output, images, bgnds, darks)
self.progress = QtGui.QProgressDialog("Progress","Cancel",0,len(images)-1)
self.thread.started.connect(self.worker.process)
self.worker.moveToThread(self.thread)
self.worker.relay.connect(self.progress.setValue)
self.worker.finished.connect(self.thread.quit)
self.progress.canceled.connect(self.worker.stop)
self.thread.start()
self.progress.exec_()
if self.progress.wasCanceled():
pass
def confirm(self, output, images, bgnds, darks):
msg = '''Number of images: {}
Number of Background images: {}
Number of Dark images: {}
HDF5 filename: {}'''.format(len(images), len(bgnds), len(darks), os.path.basename(output))
msgbox = QtGui.QMessageBox(self)
msgbox.setText('Really?')
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
msgbox.setDefaultButton(QtGui.QMessageBox.Cancel);
return msgbox.exec_()
def warning(self, msg):
msgbox = QtGui.QMessageBox(self)
msgbox.setText(msg)
msgbox.exec_()
def start_create(args):
app = QtGui.QApplication(sys.argv)
win = CreateWindow()
win.show()
win.activateWindow()
win.raise_()
sys.exit(app.exec_())
|
peterdemin/mutant | src/mutant/app.py | Python | isc | 2,308 | 0.000433 | import logging
import sys
from mutant.parsers.python_parser import PythonParser
logger = logging.getLogger(__name__)
class MutantApp(object):
"""
App stores all readers, parser middlewares generators.
All plugins can register themselves on this instance.
And finally it has `parse` and `mutate` methods, that executes mutation.
"""
def __init__(self):
self.readers = {}
self.parser_middlewares = []
self.generators = {}
self.generator_extensions = {}
self.parser = PythonParser()
def register_reader(self, name, reader):
self.readers[name] = reader
def register_parser_middleware(self, middleware):
self.parser_middlewares.append(middleware)
def register_generator(self, generator_name, generator_class):
self.generators[generator_name] = generator_class
def extend_generator(self, generator_name, extension):
self.generator_extensions.setdefault(generator_name, []).append(extension)
def parse(self, reader_name, file_or_name):
"""
Parsing contains 3 steps:
1) Read input file;
2) Apply middleware;
3) Parse schema to internal format.
"""
data = self._read(reader_name, file_or_name)
for middleware in self.parser_middlewares:
if h | asattr(middleware, 'before_parse'):
data = middleware.before_parse(data)
schema = self.parser.parse(data)
for middleware in reversed(self.parser_middlewares):
if hasattr(middleware, 'after_parse'):
schema = middleware.after_parse(schema)
self.schema = schema
return self.schema
def mutate(self, generator_name):
| gen = self.generators[generator_name](self.schema)
for ext in self.generator_extensions.get(generator_name, []):
gen.register_extension(ext)
return gen.render()
def _read(self, reader_name, file_or_name):
reader = self.readers[reader_name]
if hasattr(file_or_name, 'read'):
return reader.read(file_or_name)
else:
if file_or_name == '-':
return reader.read(sys.stdin)
else:
with open(file_or_name) as fp:
return reader.read(fp)
|
looran/tailosd | __init__.py | Python | isc | 23 | 0 | from .ta | ilosd | import *
|
iclosure/coolkits | src/jplot3d-py/com/smartsoft/j3d/GLExtensionFunctions.py | Python | gpl-2.0 | 438 | 0.015982 | '''
Created on Dec 28, 2015
@author: iclosure
'''
class | GLExtensionFunctions(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
def resolve(self, context):
pass
def fboSupported(self):
pas | s
def openGL15Supported(self):
pass
_instance = GLExtensionFunctions()
def getGLExtensionFunctions():
return _instance
|
iohannez/gnuradio | gnuradio-runtime/python/pmt/qa_pmt_to_python.py | Python | gpl-3.0 | 1,610 | 0.001863 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Stree | t,
# Boston, MA 02110-1301, USA.
#
from __future__ import absolute_import
import unittest
import pmt
from pmt import pmt_to_python as pmt2py
class test_pmt_to_python(unittest.TestCase):
def test_pmt_fro | m_double(self):
b = pmt.from_double(123765)
self.assertEqual(pmt.to_python(b), 123765)
t = pmt.to_pmt(list(range(5)))
def test_numpy_to_uvector_and_reverse(self):
import numpy as np
N = 100
narr = np.ndarray(N, dtype=np.complex128)
narr.real[:] = np.random.uniform(size=N)
narr.imag[:] = np.random.uniform(size=N)
uvector = pmt2py.numpy_to_uvector(narr)
nparr = pmt2py.uvector_to_numpy(uvector)
self.assertTrue(nparr.dtype==narr.dtype)
self.assertTrue(np.alltrue(nparr == narr))
if __name__ == '__main__':
unittest.main()
|
bwrsandman/openerp-hr | hr_employee_seniority/hr.py | Python | agpl-3.0 | 5,976 | 0.002677 | #-*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
class hr_employee(osv.Model):
_inherit = 'hr.employee'
def _get_contracts_list(self, employee):
'''Return list of contracts in chronological order'''
contracts = []
for c in | employee.contract_ids:
l = len(contracts)
if l == 0:
contracts.append(c)
else:
dCStart = datetime.strptime(c.date_start, OE_DATEFORMAT).date()
i = l - 1
while i >= 0:
dContractStart = datetime.strptime(
contracts[i].date_start, OE_DATEFORMAT).date()
if dContractStart < dCStart:
contracts = contracts[:i + 1] + [c] + co | ntracts[i + 1:]
break
elif i == 0:
contracts = [c] + contracts
i -= 1
return contracts
def _get_days_in_month(self, d):
last_date = d - timedelta(days=(d.day - 1)) + relativedelta(
months= +1) + relativedelta(days= -1)
return last_date.day
def get_months_service_to_date(self, cr, uid, ids, dToday=None, context=None):
'''Returns a dictionary of floats. The key is the employee id, and the value is
number of months of employment.'''
res = dict.fromkeys(ids, 0)
if dToday == None:
dToday = date.today()
for ee in self.pool.get('hr.employee').browse(cr, uid, ids, context=context):
delta = relativedelta(dToday, dToday)
contracts = self._get_contracts_list(ee)
if len(contracts) == 0:
res[ee.id] = (0.0, False)
continue
dInitial = datetime.strptime(
contracts[0].date_start, OE_DATEFORMAT).date()
if ee.initial_employment_date:
dFirstContract = dInitial
dInitial = datetime.strptime(
ee.initial_employment_date, '%Y-%m-%d').date()
if dFirstContract < dInitial:
raise osv.except_osv(_('Employment Date mismatch!'),
_('The initial employment date cannot be after the first contract in the system.\nEmployee: %s', ee.name))
delta = relativedelta(dFirstContract, dInitial)
for c in contracts:
dStart = datetime.strptime(c.date_start, '%Y-%m-%d').date()
if dStart >= dToday:
continue
# If the contract doesn't have an end date, use today's date
# If the contract has finished consider the entire duration of
# the contract, otherwise consider only the months in the
# contract until today.
#
if c.date_end:
dEnd = datetime.strptime(c.date_end, '%Y-%m-%d').date()
else:
dEnd = dToday
if dEnd > dToday:
dEnd = dToday
delta += relativedelta(dEnd, dStart)
# Set the number of months the employee has worked
date_part = float(delta.days) / float(
self._get_days_in_month(dInitial))
res[ee.id] = (
float((delta.years * 12) + delta.months) + date_part, dInitial)
return res
def _get_employed_months(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
_res = self.get_months_service_to_date(cr, uid, ids, context=context)
for k, v in _res.iteritems():
res[k] = v[0]
return res
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2], (list, tuple)):
if cond[1] in ['in', 'not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select id from hr_employee having %s %%s" %
(cond[1]), (amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
_columns = {
'initial_employment_date': fields.date('Initial Date of Employment', groups=False,
help='Date of first employment if it was before the start of the first contract in the system.'),
'length_of_service': fields.function(_get_employed_months, type='float', method=True,
groups=False,
string='Lenght of Service'),
}
|
justanr/JSONConfigParser | jsonconfigparser/configparser.py | Python | mit | 1,847 | 0.001624 | '''
Build a JSON Configuration Parser that can view, write and edit
a JSON conf file.
copyright 2014 Alec Nikolas Reiter
license MIT
'''
import json
from collections import UserDict
from pprint import pprint
from jsonpath_rw import parse
from .utils import root
class JSONConfigParser(UserDict):
'''Essentially a wrapper around json.load and json.dump.'''
def __init__(self, storage, source=None, encoder=None, decoder=None):
self.storage = storage
self.encoder = encoder or json.JSONEncoder
self.decoder = decoder or json.JSONDecoder
self.data = {}
if source:
self.read(source)
def read(self, fp):
'''Reads a file containing JSON and loads it into the
JSONConfigParser.data dict. If an empty file is read,
we simply ignore the exception.
'''
try:
# `a` opens file for appending
# `+` creates the file if it does not exist.
with open(fp, 'a+') as fh:
# rewind to begining of file
fh.seek(0)
self.data.update(json.load(fh, cls=self.decoder))
except ValueError:
# if the JSON file is empty
# json.load will throw a ValueError
# stating | as much, for now, we'll ignore it
pass
def view(self, path=root):
'''Pretty prints an endpoint in the JSON.
'''
expr = parse(path)
matches = expr.find(self.data)
print('\n')
for m in matches:
print("{}:".format(m.full_path))
pprint(m.value, indent=4)
def wri | te(self):
'''Persists the current instance information to disk.'''
with open(self.storage, 'w+') as fh:
json.dump(self.data, fh, indent=4, sort_keys=True, cls=self.encoder)
|
caseywstark/dimensionful | example/gravity.py | Python | bsd-2-clause | 702 | 0.001425 | """
Compute the force of gravity between the Earth and Sun.
Copyright 2012, Casey W. Stark. See LICENSE.txt for more information.
"""
# Import the g | ravitational constant and the Quantity class
from dimensionful import G, Quantity
# Supply the mass of Earth, mass of Sun, and the distance between.
mass_earth = Quantity(5.9742e27, "g")
mass_sun = Quantity(1.0, "Msun")
distance = Quantity(1.0, "AU")
# Calculate it
force_gravity = G * mass_earth * mass_sun / distance**2
force_gravity.convert_to_cgs()
# Report
print ""
print "The force of gravity between the Earth and Sun is %s" % force_gravity
print | ""
# prints "The force of gravity between the Earth and Sun is 3.54296304519e+27 cm*g/s**2"
|
BadSingleton/pyside2 | tests/QtCore/bug_408.py | Python | lgpl-2.1 | 697 | 0.004304 | import unittest
from PySide2.QtCore import *
class My | Device(QIODevice):
def __init__(self, txt):
QIODevice.__init__(self)
self.txt = txt
self.ptr = 0
def readData(self, size):
size = min(len(self.txt | ) - self.ptr, size)
retval = self.txt[self.ptr:size]
self.ptr += size
return retval
class QIODeviceTest(unittest.TestCase):
def testIt(self):
device = MyDevice("hello world\nhello again")
device.open(QIODevice.ReadOnly)
s = QTextStream(device)
self.assertEqual(s.readLine(), "hello world")
self.assertEqual(s.readLine(), "hello again")
if __name__ == '__main__':
unittest.main()
|
tzpBingo/github-trending | codespace/python/telegram/replymarkup.py | Python | mit | 1,200 | 0.000833 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your optio | n) any later version.
#
# This program is | distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""Base class for Telegram ReplyMarkup Objects."""
from telegram import TelegramObject
class ReplyMarkup(TelegramObject):
"""Base class for Telegram ReplyMarkup Objects.
See :class:`telegram.InlineKeyboardMarkup`, :class:`telegram.ReplyKeyboardMarkup`,
:class:`telegram.ReplyKeyboardRemove` and :class:`telegram.ForceReply` for
detailed use.
"""
__slots__ = ()
|
vojtechtrefny/blivet | tests/formats_test/selinux_test.py | Python | lgpl-2.1 | 2,792 | 0.001791 | #!/usr/bin/python
import os
import selinux
import tempfile
import unittest
import blivet
from tests import loopbackedtestcase
import blivet.formats.fs as fs
from blivet.size import Size
@unittest.skipUnless(selinux.is_selinux_enabled() == 1, "SELinux is disabled")
class SELinuxContextTestCase(loopbackedtestcase.LoopBackedTestCase):
"""Testing SELinux contexts.
"""
def __init__(self, methodName='runTest'):
super(SELinuxContextTestCase, self).__init__(methodName=methodName, dev | iceSpec=[Size("100 MiB")])
def testMountingExt2FS(self):
""" Test that lost+found directory gets assigned correct SELinux
context if installer_mode is True, and retains some random old
context if installer_mode is False.
"""
LOST_AND_FOUND_CONTEXT = 'system_u:object_r:lost_found_t:s0'
an_fs = fs.Ext2FS(device=self.loopDevices[0], label="test")
| self.assertIsNone(an_fs.create())
blivet.flags.installer_mode = False
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertTrue(os.path.exists(lost_and_found))
lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)
an_fs.unmount()
os.rmdir(mountpoint)
self.assertNotEqual(lost_and_found_selinux_context[1], LOST_AND_FOUND_CONTEXT)
blivet.flags.installer_mode = True
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertTrue(os.path.exists(lost_and_found))
lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)
an_fs.unmount()
os.rmdir(mountpoint)
self.assertEqual(lost_and_found_selinux_context[1], LOST_AND_FOUND_CONTEXT)
def testMountingXFS(self):
""" XFS does not have a lost+found directory. """
an_fs = fs.XFS(device=self.loopDevices[0], label="test")
self.assertIsNone(an_fs.create())
blivet.flags.installer_mode = False
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertFalse(os.path.exists(lost_and_found))
an_fs.unmount()
os.rmdir(mountpoint)
blivet.flags.installer_mode = True
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertFalse(os.path.exists(lost_and_found))
an_fs.unmount()
os.rmdir(mountpoint)
if __name__ == "__main__":
unittest.main()
|
Kjili/analysis-preservation.cern.ch | cap/modules/experiments/scripts/lhcb/lhcb.py | Python | gpl-2.0 | 3,286 | 0.006391 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import json
import os
import shelve
filenames = [
"dbases/charm.shelve",
"dbases/b2cc.shelve",
"dbases/bandq.shelve",
# "dbases/citations_cache.shelve",
"dbases/qee.shelve",
"dbases/sl.shelve",
"dbases/b2oc.shelve",
"dbases/bnoc.shelve",
"dbases/hlt.shelve",
"dbases/rd.shelve"
]
def dump_analyses_to_json():
base = {}
tmp_title_list = []
home_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(home_dir)
# Create dir for jsonschemas if doesnt exist
output_dir | = '../../static/jsonschemas/fields'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for filename in filenames:
base_field = "analysis"
title_list = []
s = shelve.open(filename)
# Get list of "title" for Anal.Name Autocomplete
title_list = s.get('analysis').keys()
for n in title_list:
tmp_title_list.append({"value": n})
working_group = filename.split("/")[1]
worki | ng_group = working_group.replace(".shelve", "")
with open('../../static/jsonschemas/fields/lhcb_ana_titles_'+working_group+'.json', 'w') as fp:
json.dump(tmp_title_list, fp)
def resolveObj(s, f, k):
newk = {}
for p in s[f][k]:
if (hasattr(s[f][k][p], '__iter__')):
newp = {}
for l in s[f][k][p]:
try:
newp[l] = s[p][l]
except:
newk[p] = s[f][k][p]
break
newk[p] = newp
else:
newk[p] = s[f][k][p]
return newk
for k in s.get(base_field):
if not k in base:
base[k] = resolveObj(s,base_field, k)
s.close()
with open('../../static/jsonschemas/fields/lhcb_ana_titles.json', 'w') as fp:
json.dump(tmp_title_list, fp)
with open('../../scripts/analyses.json', 'w') as fp:
json.dump(base, fp, ensure_ascii=False)
if __name__=='__main__':
dump_analyses_to_json()
|
diekhans/ga4gh-server | tests/end_to_end/test_oidc.py | Python | apache-2.0 | 3,844 | 0 | """
Performs a request via the client with OpenID Connect enabled,
with a local OP server.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import requests
import subprocess
from urlparse import urlparse
import lxml.html as html
import client
import server
import server_test
def getClientKey(server_url, username, password):
"""
This function a | utomatically performs the steps that the user would usually
perform manually in order to obtain a token.
"""
session = requests.session()
# Load the login page (this request includ | es the redirect to the OP)
loginPage = session.get("{}/".format(server_url), verify=False)
# Extract the state data from the login form
loginTree = html.fromstring(loginPage.text)
inputTags = loginTree.iterdescendants('input')
state = (tag for tag in inputTags if tag.name == 'state').next().value
# Submit the form data to the OP for verification (if verification is
# successful, the request will redirect to a page with the key)
data = {
'username': username,
'password': password,
'state': state
}
opLocation = urlparse(loginPage.url).netloc
nextUrl = 'https://{}/user_pass/verify'.format(opLocation)
keyPage = session.post(nextUrl, data, verify=False)
# Extract the key from the page
keyTree = html.fromstring(keyPage.text)
tokenMarker = 'Session Token ' # the token always appears after this text
tokenTag = (tag for tag in keyTree.iterdescendants()
if tag.text_content().startswith(tokenMarker)).next()
return tokenTag.text_content()[len(tokenMarker):]
class TestOidc(server_test.ServerTestClass):
"""
Tests the oidc flow
"""
@classmethod
def otherSetup(cls):
cls.simulatedVariantSetId = "c2ltdWxhdGVkRGF0YXNldDA6c2ltVnMw"
requests.packages.urllib3.disable_warnings()
cls.opServer = server.OidcOpServerForTesting()
cls.opServer.start()
@classmethod
def otherTeardown(cls):
cls.opServer.shutdown()
@classmethod
def getServer(cls):
return server.Ga4ghServerForTesting(useOidc=True)
def testOidc(self):
serverUrl = self.server.getUrl()
key = getClientKey(serverUrl, 'diana', 'krall')
test_client = client.ClientForTesting(
serverUrl, flags="--key {}".format(key))
self.runVariantsRequest(test_client)
test_client.cleanup()
def testOidcBadLoginPassword(self):
serverUrl = self.server.getUrl()
with self.assertRaises(StopIteration):
getClientKey(serverUrl, 'diana', 'krallxxx')
def testOidcBadLoginKey(self):
serverUrl = self.server.getUrl()
test_client = client.ClientForTesting(
serverUrl, flags="--key {}".format('ABC'))
with self.assertRaises(subprocess.CalledProcessError):
test_client.runCommand(
"variants-search",
"-s 0 -e 2 -V {}".format(self.simulatedVariantSetId),
debugOnFail=False)
test_client.cleanup()
def testMultipleOidcClients(self):
serverUrl = self.server.getUrl()
key = getClientKey(serverUrl, 'diana', 'krall')
key2 = getClientKey(serverUrl, 'upper', 'crust')
client1 = client.ClientForTesting(
serverUrl, flags="--key {}".format(key))
client2 = client.ClientForTesting(
serverUrl, flags="--key {}".format(key2))
self.runVariantsRequest(client1)
self.runVariantsRequest(client2)
client1.cleanup()
client2.cleanup()
def runVariantsRequest(self, client):
self.runClientCmd(
client,
"variants-search -s 0 -e 2 -V {}".format(
self.simulatedVariantSetId))
|
MangoMangoDevelopment/neptune | lib/genpy-0.5.8/src/genpy/generate_struct.py | Python | bsd-3-clause | 5,490 | 0.006193 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .base import SIMPLE_TYPES_DICT
_context_patterns = []
def add_pattern(p):
"""
Record struct pattern that's been used for (de)serialization
"""
_context_patterns.append(p)
def clear_patterns():
"""
Clear record of struct pattern that have been used for (de)serialization
"""
del _context_patterns[:]
def get_patterns():
"""
:returns: record of struct pattern that have been used for (de)serialization
"""
return _context_patterns[:]
def compute_struct_pattern(types):
"""
:param types: type names, ``[str]``
:returns: format string for struct if types are all simple. Otherwise, return None, ``str``
"""
if not types: #important to filter None and empty first
return None
try:
return ''.join([SIMPLE_TYPES_DICT[t] for t in types])
except:
return None
def reduce_pattern(pattern):
"""
Optimize the struct format pattern.
:param pattern: struct pattern, ``str``
:returns: optimized struct pattern, ``str``
"""
if not pattern or len(pattern) == 1 or '%' in pattern:
return pattern
prev = pattern[0]
count = 1
new_pattern = ''
nums = [str(i) for i in range(0, 9)]
for c in pattern[1:]:
if c == prev and not c in nums:
count += 1
else:
if count > 1:
new_pattern = new_pattern + str(count) + prev
else:
new_pattern = new_pattern + prev
prev = c
count = 1
if count > 1:
new_pattern = new_pattern + str(count) + c
else:
new_pattern = new_pattern + prev
return new_pattern
## :param expr str: string python expression that is evaluated for serialization
## :returns str: python call to write value returned by expr to serialization buffer
def serialize(expr):
return "buff.write(%s)"%expr
# int32 is very common due to length serialization, so it is special cased
def int32_pack(var):
"""
:param var: variable name, ``str``
:returns: struct packing code for an int32
"""
return serialize('_struct_I.pack(%s)'%var)
# int32 is very common due to length serialization, so it is special cased
def int32_unpack(var, buff):
"""
:param var: variable name, ``str``
:returns: struct unpacking code for an int32
"""
return '(%s,) = _struct_I.unpack(%s)'%(var, buff)
#NOTE: '<' = little endian
def pack(pattern, vars):
"""
create struct.pack call for when pattern is a string pattern
:param pattern: pattern for pack, ``str``
:param vars: name of variables to pack, ``str``
"""
# - store pattern in context
pattern = reduce_patt | ern(pattern)
add_pattern(pattern)
return serialize("_struct_%s.pack(%s)"%(pattern, vars))
def pack2(pattern, vars):
"""
create struct.pack call for when pattern is the name of a variable
:param pattern: name of variable storing string pattern, ``struct``
:param vars: name of variables to pack, `` | str``
"""
return serialize("struct.pack(%s, %s)"%(pattern, vars))
def unpack(var, pattern, buff):
"""
create struct.unpack call for when pattern is a string pattern
:param var: name of variable to unpack, ``str``
:param pattern: pattern for pack, ``str``
:param buff: buffer to unpack from, ``str``
"""
# - store pattern in context
pattern = reduce_pattern(pattern)
add_pattern(pattern)
return var + " = _struct_%s.unpack(%s)"%(pattern, buff)
def unpack2(var, pattern, buff):
"""
Create struct.unpack call for when pattern refers to variable
:param var: variable the stores the result of unpack call, ``str``
:param pattern: name of variable that unpack will read from, ``str``
:param buff: buffer that the unpack reads from, ``StringIO``
"""
return "%s = struct.unpack(%s, %s)"%(var, pattern, buff)
|
sztosz/invoices | config/wsgi.py | Python | bsd-3-clause | 1,618 | 0 | """
WSGI config for invoices project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# W | e defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's | development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
kalaspuff/tomodachi | tests/services/dummy_protobuf_service.py | Python | mit | 838 | 0 | import tomodachi
from tomodachi.discovery.dummy_registry import DummyRegistry
from tomodachi.envelope.protobuf_base import ProtobufBase
@tomodachi.service
class DummyService(tomodachi.Service):
name = "test_dummy_protobuf"
discovery = [DummyRegistry]
message_envelope = ProtobufBase
options = {
"aws_sns_sqs": {
"region_name": "eu-west-1",
"aws_access_key_id": "XXXXXXXXX",
"aws_secret_access_key": "XXXXXXXXX",
},
"amqp": {"port": 54321, "login": "invalid", "password": "invalid"},
}
start = False
started = False
stop = False
async def _s | tart_service(self) -> None:
s | elf.start = True
async def _started_service(self) -> None:
self.started = True
async def _stop_service(self) -> None:
self.stop = True
|
googleapis/python-channel | google/cloud/channel_v1/types/channel_partner_links.py | Python | apache-2.0 | 3,498 | 0.001144 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.channel_v1.types import common
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.channel.v1",
manifest={
"ChannelPartnerLinkView",
"ChannelPartnerLinkState",
"ChannelPartnerLink",
},
)
class ChannelPartnerLinkView(proto.Enum):
r"""The level of granularity the
[ChannelPartnerLink][google.cloud.channel.v1.ChannelPartnerLink]
will display.
"""
UNSPECIFIED = 0
BASIC = 1
FULL = 2
class ChannelPartnerLinkState(proto.Enum):
r"""ChannelPartnerLinkState represents state of a channel partner
link.
"""
CHANNEL_PARTNER_LINK_STATE_UNSPECIFIED = 0
INVITED = 1
ACTIVE = 2
REVOKED = 3
SUSPENDED = 4
class ChannelPartnerLink(proto.Message):
r"""Entity representing a link between distributors and their
indirect resellers in an n-tier resale channel.
Attributes:
name (str):
Output only. Resource name for the channel partner link, in
the format accounts/{account_id}/channelPartnerLinks/{id}.
reseller_cloud_identity_id (str):
Required. Cloud Identity ID of the linked
reseller.
link_state (google.cloud.channel_v1.types.ChannelPartnerLinkState):
Required. State of the channel partner link.
invite_link_uri (str):
Output only. URI of the web page where
partner accepts the link invitation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp of when the channel
partner link is created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp of when the channel
partner link is updated.
public_id (str):
Output only. Public identifier that a
customer must use to generate a transfer token
to move to this distributor-reseller
combination.
channel_partner_cloud_identity_info (google.cloud.channel_v1.types.CloudIdentityInfo):
Output only. Cloud Identity info of the
channel partner (IR).
"""
name = proto.Field(proto.STRING, number=1,)
reseller_cloud_identity_id = proto.Field(proto.STRING, number=2,)
link_state = proto.Field(proto.ENUM, number=3, enum="ChannelPartnerLinkState",)
invite_link_uri = proto.Field(proto.STRING, number=4,)
create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
public_id = proto.Field(proto.STRING, num | ber=7,)
channel_partner_cloud_identity_info = proto.Field(
proto.MESSAGE, number=8, message=common.CloudIdentityInfo | ,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
fhahn/django-endless-pagination | setup.py | Python | mit | 1,430 | 0.008392 | from distutils.core import setup
import os
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
data_files = []
for dirpath, dirnames, filenames in os.walk('endless_pagination'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
continue
elif filenames:
for f in filenames:
data_files.append(os.path.join(dirpath[len("endless_pagination")+1:], f))
version = "%s.%s" % __import__('endless_pagination').VERSION[:2]
setup(name='django-endless-pagination',
version=version,
description='Django pagination tools supporting ajax, multiple and lazy pagination, Twitter-style and Digg-style pagination.',
author='Francesco Banconi',
author_email='francesco.banconi@gmail.com',
url='http://code.google.com/p/django-endless-pagination/',
zip_safe=False,
packages=[
'en | dless_pagination',
'endless_pagination.templatetags',
],
package_data={'endless_pagination': data_files},
c | lassifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
rs2/pandas | pandas/tests/arrays/period/test_constructors.py | Python | bsd-3-clause | 3,116 | 0.000963 | import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, ms | g",
[
(
[pd.Period("2017", "D"), pd.Period("2017", "A")],
None,
"Input has different freq",
),
([pd.Period("2017", "D")], "A", "Input has different freq"),
],
)
def test_period_array_raises(data, freq, msg):
with pytest.raises(IncompatibleFrequency, match=msg):
period_array(data, freq)
def test_period_array_non_period_series_raies():
ser = pd.Series([1, 2, 3])
with pytest.raises(TypeError, m | atch="dtype"):
PeriodArray(ser, freq="D")
def test_period_array_freq_mismatch():
arr = period_array(["2000", "2001"], freq="D")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq="M")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq=pd.tseries.offsets.MonthEnd())
def test_from_sequence_disallows_i8():
arr = period_array(["2000", "2001"], freq="D")
msg = str(arr[0].ordinal)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)
|
kickstandproject/sarlacc | sarlacc/tests/asterisk/agi/test_say_number.py | Python | apache-2.0 | 2,994 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Author: Paul Belanger <paul.belanger@polybeacon.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import mock
from sarlacc.tests.asterisk.agi import test
class TestCase(test.TestCase):
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=-1"))
def test_say_number_failure(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_number(string='2000')
self.assertEqual(mock_stdout.getvalue(), 'SAY NUMBER 2000 ""\n')
self.assertFalse(res)
self.assertEqual(dtmf, '')
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=0"))
def test_say_number_success(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_number(string='25000')
self.assertEqual(mock_stdout.getvalue(), 'SAY NUMBER 25000 ""\n')
self.assertTrue(res)
self.assertEqual(dtmf, '')
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=0"))
def test_say_number_with_female_gender(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_number(string='25000', gender=False)
self.assertEqual(mock_stdout.getvalue(), 'SAY NUMBER 25000 "" f\n')
self.assertTrue(res)
self.assertEqual(dtmf, '')
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=0"))
def test_say_number_with_male_gender(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_number(string='25000', gender=True)
self.assertEqual(mock_stdout.getvalue(), 'SAY NUMBER 25000 "" m\n')
self.assertTrue(res)
self.assertEqual(dtmf, '')
@mock.patc | h('sys.stdin', cStringIO.StringIO("200 result=49"))
def test_say_number_digit_pressed(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.s | ay_number(string='5000', digits='1234')
self.assertEqual(
mock_stdout.getvalue(), 'SAY NUMBER 5000 "1234"\n'
)
self.assertTrue(res)
self.assertEqual(dtmf, '1')
|
jorsea/vertical-ngo | framework_agreement_requisition/model/purchase.py | Python | agpl-3.0 | 4,603 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013, 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import orm, fields
SELECTED_STATE = ('agreement_selected', 'Agreement selected')
AGR_SELECT = 'agreement_selected'
class purchase_order(orm.Model):
"""Add workflow behavior"""
_inherit = "purchase.order"
_columns = {
'for_agreement': fields.boolean('For Framework Agreement'),
'agreement_expected_date': fields.date('LTA expected valitidy period'),
'agreement_promised_date': fields.date('LTA promised valitidy period'),
}
def __init__(self, pool, cr):
"""Add a new state value using PO class property"""
if SELECTED_STATE not in super(purchase_order, self).STATE_SELECTION:
super(purchase_order, self).STATE_SELECTION.append(SELECTED_STATE)
super(purchase_order, self).__init__(pool, cr)
@api.cr_uid_id_context
def select_agreement(self, cr, uid, agr_id, context=None):
"""Pass PO in state 'Agreement selected'"""
if isinstance(agr_id, (list, tuple)):
assert len(agr_id) == 1
agr_id = agr_id[0]
return self.sign | al_workflow(cr, uid, [agr_id], 'select_agreement',
context=context)
def po_tender_agreement_selected(self, cr, uid, ids, context=None):
"""Workflow function that write state 'Agreement selected'"""
return self.write(cr, uid, ids, {'state': AGR_SELECT},
context=context)
class purchase_order_line(orm.Model):
"""Add make_agreement function"""
_inherit = "purchase.order.line"
# Did you know a g | ood way to supress SQL constraint to add
# Python constraint...
_sql_constraints = [(
'quantity_bid',
'CHECK(true)',
'Selected quantity must be less or equal than the quantity in the bid'
)]
def _check_quantity_bid(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.framework_agreement_id:
continue
if (
line.product_id.type == 'product' and
not line.quantity_bid <= line.product_qty
):
return False
return True
_constraints = [(
_check_quantity_bid,
'Selected quantity must be less or equal than the quantity in the bid',
[]
)]
def _agreement_data(self, cr, uid, po_line, origin, context=None):
"""Get agreement values from PO line
:param po_line: Po line records
:returns: agreement dict to be used by orm.Model.create
"""
portfolio_model = self.pool['framework.agreement.portfolio']
vals = {}
vals['portfolio_id'] = portfolio_model.get_from_supplier(
cr, uid, po_line.order_id.partner_id, context=context)[0]
vals['product_id'] = po_line.product_id.id
vals['quantity'] = po_line.product_qty
vals['delay'] = po_line.product_id.seller_delay
vals['origin'] = origin if origin else False
return vals
def make_agreement(self, cr, uid, line_id, origin, context=None):
""" generate a draft framework agreement
:returns: a record of LTA
"""
agr_model = self.pool['framework.agreement']
if isinstance(line_id, (list, tuple)):
assert len(line_id) == 1
line_id = line_id[0]
current = self.browse(cr, uid, line_id, context=context)
vals = self._agreement_data(cr, uid, current, origin, context=context)
agr_id = agr_model.create(cr, uid, vals, context=context)
return agr_model.browse(cr, uid, agr_id, context=context)
|
softtyphoon/tz | tools/17173/ymxk/复件 game_spider.py | Python | gpl-2.0 | 839 | 0.017271 |
from news_spider import news_spider
from video_spider import video_spider
if __name__ == "__main__":
print u'开始爬取咨询信息...'
# 设置资讯的存储位置,必须以 \\ 结尾,分为绝对路径和相对路径
# c:\资讯\\ C:\咨询 目录下存放 txt,c:\资讯\图片 目录下存放图片
# 咨询\ 程序当前文件夹下的 资讯 目录存储 txt, 里面的 图片目录存放图片
news_path = u'c:\资讯\\'
# 设置视频目录,同上
video_path = u'c:\视频\\'
print u'游戏资讯的存放路径是:' + news_path
a = news_spider(path = news_path)
a.run()
print u'开始爬取视频信息...'
print | u'视频信息的的存放路径是:' + video_path
a = video_spider(path = video_path)
| a.run()
pass
|
denys-duchier/Scolar | ZopeProducts/exUserFolder/AuthSources/pgAuthSourceAlt/pgAuthSource.py | Python | gpl-2.0 | 10,712 | 0.033701 | #
# Extensible User Folder
#
# Postgres Authentication Source for exUserFolder
#
# (C) Copyright 2000,2001 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <akm@theinternet.com.au>
# $Id: pgAuthSource.py,v 1.1 2004/11/10 14:15:36 akm Exp $
#
# This class only authenticates users, it stores no properties.
#
import string,Acquisition,md5
from Globals import HTMLFile, MessageDialog, INSTANCE_HOME
from OFS.Folder import Folder
from Products.ZSQLMethods.SQL import SQL
from Products.exUserFolder.exUserFolder import exUserFolder
from Products.exUserFolder.Plugins import PluginRegister
try:
from crypt import crypt
except:
from Products.exUserFolder.fcrypt.fcrypt import crypt
def manage_addpgAuthSource(self, REQUEST):
""" Add a Postgres Auth Source """
connection=REQUEST['pgauth_connection']
userTable=REQUEST['pgauth_userTable']
usernameColumn=REQUEST['pgauth_usernameColumn']
passwordColumn=REQUEST['pgauth_passwordColumn']
roleTable=REQUEST['pgauth_roleTable']
roleColumn=REQUEST['pgauth_roleColumn']
o = pgAuthSource(connection, userTable, usernameColumn, passwordColumn,
roleTable, roleColumn)
self._setObject('pgAuthSource', o, None, None, 0)
o=getattr(self,'pgAuthSource')
if hasattr(o, 'postInitialisation'):
o.postInitialisation(REQUEST)
self.currentAuthSource=o
return ''
manage_addpgAuthSourceForm=HTMLFile('manage_addpgAuthSourceForm', globals())
manage_editpgAuthSourceForm=HTMLFile('manage_editpgAuthSourceForm', globals())
class pgAuthSource(Folder):
""" Authenticate Users against a Postgres Database """
meta_type='Authentication Source'
title='Advanced Postgresql Authentication'
icon ='misc_/exUserFolder/exUserFolderPlugin.gif'
manage_tabs=Acquisition.Acquired
manage_editForm=manage_editpgAuthSourceForm
#
# You can define this to go off and do the authentication instead of
# using the basic one inside the User Object
#
remoteAuthMethod=None
def __init__(self, connection, userTable, usernameColumn, passwordColumn,
roleTable, roleColumn):
self.id='pgAuthSource'
self.connection=connection
self.userTable=userTable
self.usernameColumn=usernameColumn
self.passwordColumn=passwordColumn
self.roleTable=roleTable
self.roleColumn=roleColumn
self.addSQLQueries()
def manage_editAuthSource(self, REQUEST):
""" Edit a Postgres Auth Source """
self.connection=REQUEST['pgauth_connection']
self.userTable=REQUEST['pgauth_userTable']
self.usernameColumn=REQUEST['pgauth_usernameColumn']
self.passwordColumn=REQUEST['pgauth_passwordColumn']
self.roleTable=REQUEST['pgauth_roleTable']
self.roleColumn=REQUEST['pgauth_roleColumn']
self.delSQLQueries()
self.addSQLQueries() # Re-add queries with new parameters
def createUser(self, username, password, roles):
""" Add A Username """
if type(roles) != type([]):
if roles:
roles=list(roles)
else:
roles=[]
secret=self.cryptPassword(username, password)
self.sqlInsertUser(username=username, password=secret)
for n in roles:
self.insertUserRole(username, n)
def insertUserRole(self, username, rolename):
""" Add User Role """
self.sqlInsertUserRole(username=username, rolename=rolename)
def deleteUserRoles(self, username):
""" Delete User Roles """
self.sqlDeleteUserRoles(username=username)
def updateUser(self, username, password, roles):
if type(roles) != type([]):
if roles:
roles=list(roles)
else:
roles=[]
# Don't change passwords if it's null
if password:
secret=self.cryptPassword(username, password)
self.sqlUpdateUserPassword(username=username, password=secret)
self.deleteUserRoles(username)
for n in roles:
self.insertUserRole(username, n)
def delSQLQueries(self):
sqllist=self.objectIds('Z SQL Method')
self.manage_delObjects(ids=sqllist)
def addSQLQueries(self):
sqlListUsers=SQL(
'sqlListUsers',
'List All Users',
self.connection,
'userTable=%s'%(self.userTable),
_sqlListUsers)
self._setObject('sqlListUsers', sqlListUsers)
sqlListOneUser=SQL(
'sqlListOneUser',
'List ONE User',
self.connection,
'userTable=%s usernameColumn=%s username:string'%(
self.userTable, self.usernameColumn),
_sqlListOneUser)
self._setObject('sqlListOneUser', sqlListOneUser)
sqlListUserRoles=SQL(
'sqlListUserRoles',
'List User Roles',
self.connection,
'roleTable=%s usernameColumn=%s username:string'%(
self.roleTable, self.usernameColumn),
_sqlListUserRoles)
self._setObject('sqlListUserRoles', sqlListUserRoles)
sqlDeleteOneUser=SQL(
'sqlDeleteOneUser',
'Delete One User',
self.connection,
'userTable=%s usernameColumn=%s username:string'%(
self.userTable,self.usernameColumn),
_sqlDeleteOneUser)
self._setObject('sqlDeleteOneUser', sqlDeleteOneUser)
sqlDeleteUserRoles=SQL(
'sqlDeleteUserRoles',
'Delete User Roles',
self.connection,
'roleTable=%s usernameColumn=%s username:string'%(
self.roleTable,self.usernameColumn),
_sqlDeleteUserRoles)
self._setObject('sqlDeleteUserRoles', sqlDelet | eUserRoles)
sqlInsertUser=SQL(
'sqlInsertUser',
'Insert One User',
self.connection,
'userTable=%s usernameColumn=%s passwordColumn=%s username:string password:string'%(
self.userTable, self.usernameColumn, self.passwordColumn),
_sqlInsertUser)
self._setObject('sqlInsertUser', sqlInsertUser)
sqlInsertUserRole=SQL(
'sqlInsertUserRole',
'Insert User Role',
self.connection,
'roleTabl | e=%s usernameColumn=%s roleColumn=%s username:string rolename:string'%(
self.roleTable, self.usernameColumn, self.roleColumn),
_sqlInsertUserRole)
self._setObject('sqlInsertUserRole', sqlInsertUserRole)
sqlUpdateUserPassword=SQL(
'sqlUpdateUserPassword',
'Update just the password',
self.connection,
'userTable=%s usernameColumn=%s passwordColumn=%s username:string password:string'%(self.userTable, self.usernameColumn, self.passwordColumn),
_sqlUpdateUserPassword)
self._setObject('sqlUpdateUserPassword', sqlUpdateUserPassword)
# Original cryptPassword function
def cryptPassword_old(self, username, password):
salt =username[:2]
secret = crypt(password, salt)
return secret
# Alternate cryptPassword function, returns md5 hash of the password
# def cryptPassword_old(self, username, password):
# passhash = md5.new(password)
# secret = passhash.hexdigest()
# return secret
# Alternate cryptPassword function, returns plain text of the password.
# def cryptPassword_old(self, username, password):
# return password
def deleteUsers(self, userids):
for uid in userids:
self.sqlDeleteUserRoles(username=uid)
self.sqlDeleteOneUser(username=uid)
def listUserNames(self):
"""Returns a real list of user names """
users = []
result=self.sqlListUsers()
for n in result:
username=sqlattr(n,self.usernameColumn)
users.append(username)
return users
def listUsers(self):
"""Returns a list of user names or [] if no users exist"""
users = []
result=self.sqlListUsers()
for n in result:
username=sqlattr(n,self.usernameColumn)
N={'username':username}
users.append(N)
return users
def listOneUser(self,username):
users = []
result=self.sqlListOneUser(username=username)
for n in result:
username=sqlattr(n,self.usernameColumn)
password=sqlattr(n,self.passwordColumn)
roles=s |
ant30/s3authbasic | s3authbasic/tests/test_views.py | Python | mit | 1,402 | 0 | from s3authbasic.testing import BaseAppTest, AUTH_ENVIRON
class ViewsTests(BaseAppTest):
def test_validpath(self):
for (path, expect) in (
('/', 'home'),
('/index.html', 'home'),
('/level1', 'level 1'),
('/level1/', 'level 1'),
('/level1/index.html', 'level 1'),
('/level1/other.html', 'other'),
('/level1/level2', 'level 2'),
('/level1/level2/index.html', 'level 2'),
('/blue%20child', 'whitespace'),
('/blue%20child/index.html', 'whitespace'),
):
result = self.testapp.get(path, extra_environ=AUTH_ENVIRON,
status=200)
self.assertTrue(expect in result.body)
def test_not_validpath(self):
for path in (
'/other.html',
'/index',
'/level1/index',
'/level3/',
):
self.testapp.get(path, extra_environ=AUTH_ENVIRON,
status=404)
class MimeTypeTests(BaseAppTest):
def test_mimetypes(self):
for (path, mimetype) in (
('/', 'text/html'),
| ('/index.html', 'text/html'),
('/example.jpeg', 'image/jpeg'),
):
result = self.testapp.get(path, extra_environ=AUTH_ENVIRON)
self.assertEqual(result.content_type, mi | metype)
|
levilucio/SyVOLT | mbeddr2C_MM/Contracts/HSimple_IsolatedLHS.py | Python | mit | 21,285 | 0.012074 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HSimple_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HSimple_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HSimple_IsolatedLHS, self).__init__(name='HSimple_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Simple')
# Set the node attributes
# match class ImplementationModule() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__ImplementationModule"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Function() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Function"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class ComponentInstance() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__ComponentInstance"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class InstanceConfiguration() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__InstanceConfiguration"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class TestCase() node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__TestCase"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class StatementList() node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__StatementList"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class InitializeConfiguration() node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node | shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#=============================================================================== |
return True
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__InitializeConfiguration"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class StatementList() node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """
#=============================================================================== |
RoyXiang/ehForwarderBot | config.sample.py | Python | gpl-3.0 | 1,192 | 0.000839 | # #### | ##########################
# Configs for EH Forwarder Bot
# ##############################
#
# Basic settings
# ----------------
#
# Master/Slave Channels
#
# Master channel:
# The channel that is | mainly used to view/manage messages
# from slave channels.
# Currently only 1 master channel is supported.
#
# Slave channels:
# Channels that are hosted on the server and being delivered
# to and from the master channel.
# You may have more than 1 slave channel.
#
master_channel = 'plugins.eh_telegram_master', 'TelegramChannel'
slave_channels = [('plugins.eh_wechat_slave', 'WeChatChannel')]
#
# Plugin specific settings
# --------------------------
# Plugin specific settings should be written below in the format of:
# `channel_name = {"key1": "value1", "key2": "value2"}`
# Please refer to docs of individual plugin for details
#
eh_telegram_master = {
"token": "12345678:QWFPGJLUYarstdheioZXCVBKM",
"admins": [0],
"bing_speech_api": ["3243f6a8885a308d313198a2e037073", "2b7e151628ae082b7e151628ae08"],
"baidu_speech_api": {
"app_id": 0,
"api_key": "3243f6a8885a308d313198a2e037073",
"secret_key": "2b7e151628ae082b7e151628ae08"
}
}
|
silly-wacky-3-town-toon/SOURCE-COD | toontown/pets/PetTraits.py | Python | apache-2.0 | 9,416 | 0.004142 | from toontown.toonbase.ToonPythonUtil import randFloat, normalDistrib, Enum
from toontown.distributed.PythonUtil import clampScalar
from toontown.toonbase import TTLocalizer, ToontownGlobals
import random, copy
TraitDivisor = 10000
def getTraitNames():
if not hasattr(PetTraits, 'TraitNames'):
traitNames = []
for desc in PetTraits.TraitDescs:
traitNames.append(desc[0])
PetTraits.TraitNames = traitNames
return PetTraits.TraitNames
def uniform(min, max, rng):
return randFloat(min, max, rng.random)
def gaussian(min, max, rng):
return normalDistrib(min, max, rng.gauss)
class TraitDistribution:
TraitQuality = Enum('VERY_BAD, BAD, AVERAGE, GOOD, VERY_GOOD')
TraitTypes = Enum('INCREASING, DECREASING')
Sz2MinMax = None
TraitType = None
TraitCutoffs = {TraitTypes.INCREASING: {TraitQuality.VERY_BAD: 0.1,
TraitQuality.BAD: 0.25,
TraitQuality.GOOD: 0.75,
TraitQuality.VERY_GOOD: 0.9},
TraitTypes.DECREASING: {TraitQuality.VERY_BAD: 0.9,
TraitQuality.BAD: 0.75,
TraitQuality.GOOD: 0.25,
TraitQuality.VERY_GOOD: 0.1}}
def __init__(self, rndFunc = gaussian):
self.rndFunc = rndFunc
if not hasattr(self.__class__, 'GlobalMinMax'):
_min = 1.0
_max = 0.0
minMax = self.Sz2MinMax
for sz in minMax:
thisMin, thisMax = minMax[sz]
_min = min(_min, thisMin)
_max = max(_max, thisMax)
self.__class__.GlobalMinMax = [_min, _max]
def getRandValue(self, szId, rng = random):
min, max = self.getMinMax(szId)
return self.rndFunc(min, max, rng)
def getHigherIsBetter(self):
return self.TraitType == TraitDistribution.TraitTypes.INCREASING
def getMinMax(self, szId):
return (self.Sz2MinMax[szId][0], self.Sz2MinMax[szId][1])
def getGlobalMinMax(self):
return (self.GlobalMinMax[0], self.GlobalMinMax[1])
def _getTraitPercent(self, traitValue):
gMin, gMax = self.getGlobalMinMax()
if traitValue < gMin:
gMin = traitValue
elif traitValue > gMax:
gMax = traitValue
return (traitValue - gMin) / (gMax - gMin)
def getPercentile(self, traitValue):
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
return self._getTraitPercent(traitValue)
else:
return 1.0 - self._getTraitPercent(traitValue)
def getQuality(self, traitValue):
TraitQuality = TraitDistribution.TraitQuality
TraitCutoffs = self.TraitCutoffs[self.TraitType]
percent = self._getTraitPercent(traitValue)
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
if percent <= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
elif percent <= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
elif percent >= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
elif percent >= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
else:
return TraitQuality.AVERAGE
elif percent <= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
elif percent <= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
elif percent >= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
elif percent >= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
else:
return TraitQuality.AVERAGE
def getExtremeness(self, traitValue):
percent = self._getTraitPercent(traitValue)
if percent < 0.5:
howExtreme = (0.5 - percent) * 2.0
else:
howExtreme = (percent - 0.5) * 2.0
return clampScalar(howExtreme, 0.0, 1.0)
class PetTraits:
class StdIncDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.INCREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.2, 0.65),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.4, 0.75),
ToontownGlobals.MinniesMelodyland: (0.5, 0.8),
ToontownGlobals.TheBrrrgh: (0.6, 0.85),
ToontownGlobals.DonaldsDreamland: (0.7, 0.9)}
class StdDecDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.35, 0.8),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.25, 0.6),
ToontownGlobals.MinniesMelodyland: (0.2, 0.5),
ToontownGlobals.TheBrrrgh: (0.15, 0.4),
ToontownGlobals.DonaldsDreamland: (0.1, 0.3)}
class ForgetfulnessDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.0, 1.0),
ToontownGlobals.DonaldsDock: (0.0, 0.9),
ToontownGlobals.DaisyGardens: (0.0, 0.8),
ToontownGlobals.MinniesMelodyland: (0.0, 0.7),
ToontownGlobals.TheBrrrgh: (0.0, 0.6),
ToontownGlobals.DonaldsDreamland: (0.0, 0.5)}
TraitDescs = (('forgetfulness', ForgetfulnessDistrib(), True),
('boredomThreshold', StdIncDistrib(), True),
('restlessnessThreshold', StdIncDistrib(), True),
('playfulnessThreshold', StdDecDistrib(), True),
('lonelinessThreshold', StdIncDistrib(), True),
('sadnessThreshold', StdIncDistrib(), True),
('fatigueThreshold', StdIncDistrib(), True),
('hungerThreshold', StdIncDistrib(), True),
('confusionThreshold', StdIncDistrib(), True),
('excitementThreshold', StdDecDistrib(), True),
('angerThreshold', StdIncDistrib(), True),
('surpriseThreshold', StdIncDistrib(), False),
('affectionThreshold', StdDecDistrib(), True))
NumTraits = len(TraitDescs)
class Trait:
def __init__(self, index, traitsObj, value = None):
self.name, distrib, self.hasWorth = PetTraits.TraitDescs[index]
if value is not None:
self.value = value
else:
szId = traitsObj.safeZoneId
self.value = distrib.getRandValue(szId, traitsObj.rng)
self.value = int(self.value * TraitDivisor) / float(Tr | aitDivisor)
self.higherIsBetter = distrib.getHigherIsBetter()
self.percentile = distrib.getPercentile(self.value)
self.quality = distrib.getQuality(self.value)
self.howExtreme = distrib.getExtremeness(self.value)
retur | n
def __repr__(self):
return 'Trait: %s, %s, %s, %s' % (self.name,
self.value,
TraitDistribution.TraitQuality.getString(self.quality),
self.howExtreme)
def __init__(self, traitSeed, safeZoneId, traitValueList = []):
self.traitSeed = traitSeed
self.safeZoneId = safeZoneId
self.rng = random.Random(self.traitSeed)
self.traits = {}
for i in xrange(len(PetTraits.TraitDescs)):
if i < len(traitValueList) and traitValueList[i] > 0.0:
trait = PetTraits.Trait(i, self, traitValueList[i])
else:
trait = PetTraits.Trait(i, self)
self.traits[trait.name] = trait
self.__dict__[trait.name] = trait.value
extremeTraits = []
for trait in self.traits.values():
if not trait.hasWorth:
continue
if trait.quality == TraitDistribution.TraitQuality.AVERAGE:
continue
i = 0
while i < len(extremeTraits) and extremeTraits[i].howExtreme > trait.howExtreme:
i += 1
extremeTraits.insert(i, trait)
self.extremeTraits = []
for trait in extremeTraits:
self.extremeTraits.append((trait.name, trait.quali |
szu-stu/ezFund | fund/apps.py | Python | apache-2.0 | 83 | 0 | from dja | ngo.apps import AppConfig
class FundConfig(AppConfig):
name = ' | fund'
|
python-dirbtuves/website | pylab/core/helpers/factories.py | Python | agpl-3.0 | 2,104 | 0.000951 | import datetime
import faker
class FakerFactoryBoyWrapper(object):
"""Small wrapper around faker for factory boy.
Usage:
>>> from factory import LazyAttribute
>>> from pylab.core.helpers.factories import fake
>>> LazyAttribute(fake.company()) # doctest: +ELLIPSIS
<factory.declarations.LazyAttribute object at 0x...>
"""
def __init__(self):
self.faker = faker.Factory.create()
def __getattr__(self, name):
faker_method = getattr(self.faker, name)
def wrapper(*args, **kwargs):
def func(obj=None): # pylint: disable=unused-argument
return faker_method(*args, **kwargs)
return func
return wrapper
fake = FakerFactoryBoyWrapper() # pylint: disable=invalid-name
_now = datetime.datetime.utcnow()
def _get_timedelta_kwargs(**kwargs):
return {k: v for k, v in kwargs.items() if k in {
'days', 'seconds', 'microseconds', 'milliseconds', 'minutes', 'hours', 'weeks',
}}
def _get_datetime | _replace_kwargs(**kwargs):
return {k: v for k, v in kwargs.items() if k in {
'year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'tzi | nfo',
}}
def _get_datetime(dt, date=False, **kwargs):
if date:
dt = dt.date()
replace_kwargs = _get_datetime_replace_kwargs(**kwargs)
if replace_kwargs:
dt = dt.replace(**replace_kwargs)
return dt
def now():
def func(obj=None): # pylint: disable=unused-argument
return _now
return func
def today():
def func(obj=None): # pylint: disable=unused-argument
return _now.date()
return func
def future(**kwargs):
delta = datetime.timedelta(**_get_timedelta_kwargs(**kwargs))
def func(obj=None): # pylint: disable=unused-argument
return _get_datetime(_now + delta, **kwargs)
return func
def past(**kwargs):
delta = datetime.timedelta(**_get_timedelta_kwargs(**kwargs))
def func(obj=None): # pylint: disable=unused-argument
return _get_datetime(_now - delta, **kwargs)
return func
|
Lucas-Armand/genetic-algorithm | dev/9ºSemana/testes of speed.py | Python | gpl-3.0 | 3,255 | 0.036252 | # -*- coding: utf-8 -*-
import os
import csv
import random
import numpy as np
import timeit
import time as Time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from itertools import product, combinations
class Block:
def __init__(self,point,a,b,c,weight,btype):
self.p=point
self.a=a
self.b=b
self.c=c
self.w=weight
self.t=btype
def csv_read(name): #Metodo de leitura, transforma um arquivo CSV em um vetor
CSV=open(name,'r')
dados=CSV.read()
dados=dados.replace(',','.')
dados=dados.replace(';',',')
CSV.close()
CSV=open("temp.csv",'w')
CSV.write(dados)
CSV.close()
CSV=open("temp.csv",'r')
dados=csv.reader(CSV)
v=[]
for i in dados:
I=[]
for j in i:
try:
j = float(j)
except:
pass
I.append(j)
v.append(I)
CSV.close()
os.remove("temp.csv")
return (v)
def defineGeometry(name):
vect = csv_read(name)
blockNumber ={}
for i in vect:
a = i[1]
b = i[2]
c = i[3]
point = [i[4],i[5],i[6]]
weight = i[7]
btype = i[-1]
block = Block(point,a,b,c,weight,btype)
blockNumber[i[0]] = block
return blockNumber
bNumb=defineGeometry('GeometriaNavio.csv')
# Define vicinity
#deck
vicinity={1:[2]}
for i in range(2,16):
vicinity[i] = [i-1,i+1]
vicinity[16] = [15]
#side
vicinity[17] = [18,19]
vicinity[18] = [17,20]
for i in range(19,31):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[31] = [29,32]
vicinity[32] = [30,31]
#bott
vicinity[33] = [34,35]
vicinity[34] = [33,36]
for i in range(35,63):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[63] = [61,64]
vicinity[64] = [63,62]
#coff
vicinity[65] = [66]
for i in range(66,70):
vicinity[i] = [i-1,i+1]
vicinity[70] = [69]
alfa = 10
beta = 1
built = []
time = 0
append = built.append
def order(x): return vicinity[x]
def time(bNumb,vicinity,chromo):
t_time = Time.time()
alfa = 1
built = []
time = 0
append = built.append
def time_vector(x,y):
for i in y:
if i in built:
time = alfa
break
try:time
except: time = 10*alfa
append(x)
return time
|
vic = [vicinity[x] for x in chromo]
time = sum((time_vector(x,y) for x,y in zip(chromo,vic)))
return time
chromo = [44, 39, 56, 47, 49, 37, 42, 46, 51, 58, 60, 62, 52, 41, 35, 33, 50, 61, 54, 34, 59, 43, 48, 45, 55, 53, 38, 57, 64, 67, 68, 63, 40, 36, 21, 66, 22, 6, 20, 65, 18, 5, 17, 69, 28, 27, 70, 29, 1, 12, | 30, 13, 14, 26, 31, 24, 19, 2, 3, 4, 25, 11, 32, 10, 15, 16, 9, 23, 7, 8]
import cProfile
cProfile.run('time(bNumb,vicinity,chromo)')
##
##print timeit.timeit(setup='from __main__ import chromo;'+
## 'from __main__ import bNumb;'+
## 'from __main__ import time;'+
## 'from __main__ import vicinity '
## ,stmt='time(bNumb,vicinity,chromo)')
#print t.timeit(number = 1000000)
|
foobacca/django-multilingual-search | tests/testproject/translation.py | Python | mit | 321 | 0.003115 | # coding: utf-8
from __future__ import ab | solute_import, unicode_literals
from modeltranslation.translator import translator, TranslationOptions
from .models import Document
class DocumentTranslationOptions(TranslationOptions):
fields = ('title', 'text')
translator.register(Document, DocumentTranslationOpt | ions)
|
toofar/qutebrowser | tests/unit/scripts/test_check_coverage.py | Python | gpl-3.0 | 6,650 | 0 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import sys
import os.path
import pytest
from scripts.dev import check_coverage
pytest_plugins = 'pytester'
pytestmark = [pytest.mark.linux, pytest.mark.not_frozen]
class CovtestHelper:
"""Helper object for covtest fixture.
Attributes:
_testdir: The testdir fixture from pytest.
_monkeypatch: The monkeypatch fixture from pytest.
"""
def __init__(self, testdir, monkeypatch):
self._testdir = testdir
self._monkeypatch = monkeypatch
def makefile(self, code):
"""Generate a module.py for the given code."""
self._testdir.makepyfile(module=code)
def run(self):
"""Run pytest with coverage for the given module.py."""
coveragerc = str(self._testdir.tmpdir / 'coveragerc')
self._monkeypatch.delenv('PYTEST_ADDOPTS', raising=False)
return self._testdir.runpytest('--cov=module',
'--cov-config={}'.format(coveragerc),
'--cov-report=xml',
plugins=['no:faulthandler'])
def check(self, perfect_files=None):
"""Run check_coverage.py and run its return value."""
coverage_file = self._testdir.tmpdir / 'coverage.xml'
if perfect_files is None:
perfect_files = [(None, 'module.py')]
argv = [sys.argv[0]]
self._monkeypatch.setattr(check_coverage.sys, 'argv', argv)
with self._testdir.tmpdir.as_cwd():
with coverage_file.open(encoding='utf-8') as f:
return check_coverage.check(f, perfect_files=perfect_files)
def check_skipped(self, args, reason):
"""Run check_coverage.py and make sure it's skipped."""
argv = [sys.argv[0]] + list(args)
self._monkeypatch.setattr(check_coverage.sys, 'argv', argv)
with pytest.raises(check_coverage.Skipped) as excinfo:
return check_coverage.check(None, perfect_files=[])
assert excinfo.value.reason == reason
@pytest.fixture
def covtest(testdir, monkeypatch):
"""Fixture which provides a coveragerc and a test to call module.func."""
testdir.makefile(ext='', coveragerc="""
[run]
branch=True
""")
testdir.makepyfile(test_module="""
from module import func
def test_module():
func()
""")
return CovtestHelper(testdir, monkeypatch)
def test_tested_no_branches(covtest):
covtest.makefile("""
def func():
pass
""")
covtest.run()
assert covtest.check() == []
def test_tested_with_branches(covtest):
covtest.makefile("""
def func2(arg):
if arg:
pass
else:
pass
def func():
func2(True)
func2(False)
""")
covtest.r | un()
assert covtest.check() == []
def test_untested(covtest):
covtest.makefile("""
def func():
pass
def untested():
pass
""")
covtest.run()
expected = check_coverage.Message(
ch | eck_coverage.MsgType.insufficent_coverage,
'module.py',
'module.py has 75.00% line and 100.00% branch coverage!')
assert covtest.check() == [expected]
def test_untested_floats(covtest):
"""Make sure we don't report 58.330000000000005% coverage."""
covtest.makefile("""
def func():
pass
def untested():
pass
def untested2():
pass
def untested3():
pass
def untested4():
pass
def untested5():
pass
""")
covtest.run()
expected = check_coverage.Message(
check_coverage.MsgType.insufficent_coverage,
'module.py',
'module.py has 58.33% line and 100.00% branch coverage!')
assert covtest.check() == [expected]
def test_untested_branches(covtest):
covtest.makefile("""
def func2(arg):
if arg:
pass
else:
pass
def func():
func2(True)
""")
covtest.run()
expected = check_coverage.Message(
check_coverage.MsgType.insufficent_coverage,
'module.py',
'module.py has 100.00% line and 50.00% branch coverage!')
assert covtest.check() == [expected]
def test_tested_unlisted(covtest):
covtest.makefile("""
def func():
pass
""")
covtest.run()
expected = check_coverage.Message(
check_coverage.MsgType.perfect_file,
'module.py',
'module.py has 100% coverage but is not in perfect_files!')
assert covtest.check(perfect_files=[]) == [expected]
@pytest.mark.parametrize('args, reason', [
(['-k', 'foo'], "because -k is given."),
(['-m', 'foo'], "because -m is given."),
(['--lf'], "because --lf is given."),
(['blah', '-m', 'foo'], "because -m is given."),
(['tests/foo'], "because there is nothing to check."),
])
def test_skipped_args(covtest, args, reason):
covtest.check_skipped(args, reason)
@pytest.mark.fake_os('windows')
def test_skipped_non_linux(covtest):
covtest.check_skipped([], "on non-Linux system.")
def _generate_files():
"""Get filenames from WHITELISTED_/PERFECT_FILES."""
for src_file in check_coverage.WHITELISTED_FILES:
yield os.path.join('qutebrowser', src_file)
for test_file, src_file in check_coverage.PERFECT_FILES:
if test_file is not None:
yield test_file
yield os.path.join('qutebrowser', src_file)
@pytest.mark.parametrize('filename', list(_generate_files()))
def test_files_exist(filename):
basedir = os.path.join(os.path.dirname(check_coverage.__file__),
os.pardir, os.pardir)
assert os.path.exists(os.path.join(basedir, filename))
|
hobson/pug-nlp | pug/nlp/segmentation.py | Python | mit | 13,806 | 0.004346 | import os
import re
import string
from itertools import chain
from .detector_morse import Detector
from .detector_morse import slurp
# from .penn_treebank_tokenizer import word_tokenize
import nlup
from pug.nlp.constant import DATA_PATH
from pug.nlp.util import generate_files
# regex namespace only conflicts with regex kwarg in Tokenizer constructur
from pug.nlp.regex import CRE_TOKEN, RE_NONWORD
def list_ngrams(token_list, n=1, join=' '):
"""Return a list of n-tuples, one for each possible sequence of n items in the token_list
Arguments:
join (bool or str): if str, then join ngrom tuples on it before returning
True is equivalent to join=' '
default = True
See: http://stackoverflow.com/a/30609050/623735
>>> list_ngrams('goodbye cruel world'.split(), join=False)
[('goodbye',), ('cruel',), ('world',)]
>>> list_ngrams('goodbye cruel world'.split(), 2, join=False)
[('goodbye', 'cruel'), ('cruel', 'world')]
"""
join = ' ' if join is True else join
if isinstance(join, basestring):
return [join.join(ng) for ng in list_ngrams(token_list, n=n, join=False)]
return zip(*[token_list[i:] for i in range(n)])
def list_ngram_range(token_list, *args, **kwargs):
"""Return a list of n-tuples, one for each possible sequence of n items in the token_list
Arguments:
join (bool or str): if str, then join ngrom tuples on it before returning
True is equivalent to join=' '
default = True
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join=False)
[('goodbye',), ('cruel',), ('world',), ('goodbye', 'cruel'), ('cruel', 'world')]
>>> list_ngram_range('goodbye cruel world'.split(), 2, join=False)
[('goodbye',), ('cruel',), ('world',), ('goodbye', 'cruel'), ('cruel', 'world')]
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join='|')
['goodbye', 'cruel', 'world', 'goodbye|cruel', 'cruel|world']
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join=True)
['goodbye', 'cruel', 'world', 'goodbye cruel', 'cruel world']
"""
m, n = (args if len(args) > 1 else ((0, args[0]) if args else (0, 1)))
join = args[2] if len(args) > 2 else kwargs.pop('join', True)
return list(chain(*(list_ngrams(token_list, i + 1, join=join) for i in range(0, n))))
def generate_sentences(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):
"""Generate sentences from a sequence of characters (text)
Thin wrapper for Kyle Gorman's "DetectorMorse" module
Arguments:
case_sensitive (int): whether to consider case to make decisions about sentence boundaries
epochs (int): number of epochs (iterations for classifier training)
"""
if train_path:
generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)
# generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)
return iter(generate_sentences.detector.segments(text))
generate_sentences.detector = nlup.decorators.IO(Detector.load)(os.path.join(DATA_PATH, 'wsj_detector_morse_model.json.gz'))
def str_strip(s, strip_chars=string.punctuation + ' \t\n\r'):
return s.strip(strip_chars)
def str_lower(s):
return s.lower()
def to_ascii(s, filler='-'):
if not s:
return ''
if not isinstance(s, basestring): # e.g. np.nan
return to_ascii(repr(s))
try:
return s.encode('utf8')
except:
return ''.join(c if c < chr(128) else filler for c in s if c)
stringify = to_ascii
def passthrough(s):
return s
class Tokenizer(object):
"""Callable and iterable class that yields substrings split on spaces or other configurable delimitters.
For both __init__ and __call__, doc is the first arg.
TODO: All args and functionality of __init__() and __call__() should be the same.
FIXME: Implement the `nltk.tokenize.TokenizerI` interface
Is it at all pythonic to make a class callable and iterable?
Is it pythonic to have to instantiate a TokenizerI instance and then call that instance's `tokenize` method?
>>> abc = (chr(ord('a') + (i % 26)) for i in xrange(1000))
>>> tokenize = Tokenizer(ngrams=5)
>>> ans = list(tokenize(' '.join(abc)))
>>> ans[:7]
['a', 'b', 'c', 'd', 'e', 'f', 'g']
>>> ans[1000:1005]
['a b', 'b c', 'c d', 'd e', 'e f']
>>> ans[1999:2004]
['a b c', 'b c d', 'c d e', 'd e f', 'e f g']
>>> tokenize = Tokenizer(stem='Porter')
>>> doc = "Here're some stemmable words provided to you for your stemming pleasure."
>>> sorted(set(tokenize(doc)) - set(Tokenizer(doc, stem='Lancaster')))
[u"Here'r", u'pleasur', u'some', u'stemmabl', u'your']
>>> sorted(set(Tokenizer(doc, stem='WordNet')) - set(Tokenizer(doc, stem='Lancaster')))
["Here're", 'pleasure', 'provided', 'some', 'stemmable', 'stemming', 'your']
"""
def __init__(self, doc=None, regex=CRE_TOKEN, strip=True, nonwords=False, nonwords_set=None, nonwords_regex=RE_NONWORD,
lower=None, stem=None, ngrams=1):
# specific set of characters to strip
self.strip_chars = None
if isinstance(strip, basestring):
self.strip_chars = strip
# strip_chars takes care of the stripping config, so no need for strip function anymore
| self.strip = None
elif strip is True:
self.strip_chars = '-_*`()"' + '"'
strip = strip or None
# strip whitespace, overrides strip() method
self.strip = strip if callable(strip) else (str_strip if strip else None)
self.doc = to_ascii(doc)
self.regex = regex
if isinstance(self.regex, basestring):
self.regex = re.compile(self.regex)
self.nonwords = nonwords # whether to use the default REGEX for nonwords
self.nonwords_set = nonwords_set or set()
self.nonwords_regex = nonwords_regex
self.lower = lower if callable(lower) else (str_lower if lower else None)
self.stemmer_name, self.stem = 'passthrough', passthrough # stem can be a callable Stemmer instance or just a function
self.ngrams = ngrams or 1 # ngram degree, numger of ngrams per token
if isinstance(self.nonwords_regex, basestring):
self.nonwords_regex = re.compile(self.nonwords_regex)
elif self.nonwords:
try:
self.nonwords_set = set(self.nonwords)
except TypeError:
self.nonwords_set = set(['None', 'none', 'and', 'but'])
# if a set of nonwords has been provided dont use the internal nonwords REGEX?
self.nonwords = not bool(self.nonwords)
def __call__(self, doc):
"""Lazily tokenize a new document (tokens aren't generated until the class instance is iterated)
>>> list(Tokenizer()('new string to parse'))
['new', 'string', 'to', 'parse']
"""
# tokenization doesn't happen until you try to iterate through the Tokenizer instance or class
self.doc = to_ascii(doc)
# need to return self so that this will work: Tokenizer()('doc (str) to parse even though default doc is None')
return self
# to conform to this part of the nltk.tokenize.TokenizerI interface
tokenize = __call__
def __reduce__(self):
"""Unpickling constructor and args so that pickling can be done efficiently without any bound methods, etc"""
return (Tokenizer, (None, self.regex, self.strip, self.nonwords, self.nonwords_set, self.nonwords_regex,
self.lower, self.stemmer_name, self.ngrams))
def span_tokenize(self, s):
"""Identify the tokens using integer offsets `(start_i, end_i)` rather than copying them to a new sequence
The sequence of tokens (strings) can be generated with
`s[start_i:end_i] for start_i, end_i in span_tokenize(s)`
Returns:
generator of 2-tuples of ints, like ((int, int) for token in s)
"""
return
# raise NotImplementedE |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.