repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
saurabh6790/tru_app_back
|
refs/heads/master
|
patches/november_2012/leave_application_cleanup.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.reload_doc("core", "doctype", "doctype")
webnotes.clear_perms("Leave Application")
webnotes.reload_doc("hr", "doctype", "leave_application")
webnotes.conn.sql("""update `tabLeave Application` set status='Approved'
where docstatus=1""")
webnotes.conn.sql("""update `tabLeave Application` set status='Open'
where docstatus=0""")
|
dardevelin/rhythmbox-gnome-fork
|
refs/heads/master
|
plugins/im-status/im-status.py
|
3
|
# coding: utf-8
# vim: set et sw=2:
#
# Copyright (C) 2007-2008 - Vincent Untz
# Copyright (C) 2012 - Nirbheek Chauhan <nirbheek@gentoo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import rb
import gi
from gi.repository import Gio, GLib, GObject, Peas
from gi.repository import RB
import gettext
gettext.install('rhythmbox', RB.locale_dir(), unicode=True)
NORMAL_SONG_ARTIST = 'artist'
NORMAL_SONG_TITLE = 'title'
NORMAL_SONG_ALBUM = 'album'
STREAM_SONG_ARTIST = 'rb:stream-song-artist'
STREAM_SONG_TITLE = 'rb:stream-song-title'
STREAM_SONG_ALBUM = 'rb:stream-song-album'
PROPERTIES_IFACE_NAME = 'org.freedesktop.DBus.Properties'
MC5_BUS_NAME = 'org.freedesktop.Telepathy.MissionControl5'
MC5_AM_OBJ_PATH = '/org/freedesktop/Telepathy/AccountManager'
MC5_AM_IFACE_NAME = 'org.freedesktop.Telepathy.AccountManager'
MC5_ACCT_IFACE_NAME = 'org.freedesktop.Telepathy.Account'
PURPLE_BUS_NAME = 'im.pidgin.purple.PurpleService'
PURPLE_OBJ_PATH = '/im/pidgin/purple/PurpleObject'
PURPLE_IFACE_NAME = 'im.pidgin.purple.PurpleInterface'
class IMStatusPlugin (GObject.Object, Peas.Activatable):
__gtype_name__ = 'IMStatusPlugin'
object = GObject.property(type=GObject.Object)
def __init__ (self):
GObject.Object.__init__ (self)
def _init_dbus_proxies(self):
self.proxies = {}
bus_type = Gio.BusType.SESSION
flags = 0
iface_info = None
# Creating proxies doesn't do any blocking I/O, and never fails
self.proxies["purple"] = Gio.DBusProxy.new_for_bus_sync(bus_type, flags, iface_info,
PURPLE_BUS_NAME, PURPLE_OBJ_PATH, PURPLE_IFACE_NAME, None)
self.proxies["mc5_props"] = Gio.DBusProxy.new_for_bus_sync(bus_type, flags, iface_info,
MC5_BUS_NAME, MC5_AM_OBJ_PATH, PROPERTIES_IFACE_NAME, None)
def do_activate (self):
shell = self.object
sp = shell.props.shell_player
self.psc_id = sp.connect ('playing-song-changed',
self.playing_entry_changed)
self.pc_id = sp.connect ('playing-changed',
self.playing_changed)
self.pspc_id = sp.connect ('playing-song-property-changed',
self.playing_song_property_changed)
self.current_entry = None
self.current_artist = None
self.current_title = None
self.current_album = None
self._init_dbus_proxies ()
self.save_status ()
if sp.get_playing ():
self.set_entry (sp.get_playing_entry ())
def do_deactivate (self):
shell = self.object
sp = shell.props.shell_player
sp.disconnect (self.psc_id)
sp.disconnect (self.pc_id)
sp.disconnect (self.pspc_id)
if self.current_entry is not None:
self.restore_status ()
def playing_changed (self, sp, playing):
if playing:
self.set_entry (sp.get_playing_entry ())
else:
self.current_entry = None
self.restore_status ()
def playing_entry_changed (self, sp, entry):
if sp.get_playing ():
self.set_entry (entry)
def playing_song_property_changed (self, sp, uri, property, old, new):
relevant = False
if sp.get_playing () and property in (NORMAL_SONG_ARTIST, STREAM_SONG_ARTIST):
self.current_artist = new
relevant = True
elif sp.get_playing () and property in (NORMAL_SONG_TITLE, STREAM_SONG_TITLE):
self.current_title = new
relevant = True
elif sp.get_playing () and property in (NORMAL_SONG_ALBUM, STREAM_SONG_ALBUM):
self.current_album = new
relevant = True
if relevant:
self.set_status ()
def set_entry (self, entry):
if rb.entry_equal(entry, self.current_entry):
return
if self.current_entry == None:
self.save_status ()
self.current_entry = entry
if entry is None:
self.restore_status ()
return
self.set_status_from_entry ()
def set_status_from_entry (self):
shell = self.object
db = shell.get_property ("db")
self.current_artist = self.current_entry.get_string(RB.RhythmDBPropType.ARTIST)
self.current_title = self.current_entry.get_string(RB.RhythmDBPropType.TITLE)
self.current_album = self.current_entry.get_string(RB.RhythmDBPropType.ALBUM)
if self.current_entry.get_entry_type().props.category == RB.RhythmDBEntryCategory.STREAM:
if not self.current_artist:
self.current_artist = db.entry_request_extra_metadata (self.current_entry, STREAM_SONG_ARTIST)
if not self.current_title:
self.current_title = db.entry_request_extra_metadata (self.current_entry, STREAM_SONG_TITLE)
if not self.current_album:
self.current_album = db.entry_request_extra_metadata (self.current_entry, STREAM_SONG_ALBUM)
self.set_status ()
def set_status (self):
subs = {
'artist': unicode (self.current_artist, encoding='utf-8'),
'album': unicode (self.current_album, encoding='utf-8'),
'title': unicode (self.current_title, encoding='utf-8')
}
if self.current_artist:
if self.current_title:
# Translators: do not translate %(artist)s or %(title)s, they are
# string substitution markers (like %s) for the artist and title of
# the current playing song. They can be reordered if necessary.
new_status = _(u"♫ %(artist)s - %(title)s ♫") % subs
elif self.current_album:
# Translators: do not translate %(artist)s or %(album)s, they are
# string substitution markers (like %s) for the artist and album name
# of the current playing song. They can be reordered if necessary.
new_status = _(u"♫ %(artist)s - %(album)s ♫") % subs
elif self.current_album:
# Translators: do not translate %(album)s, it is a string substitution
# marker (like %s) for the album name of the current playing song.
new_status = _(u"♫ %(album)s ♫") % subs
elif self.current_title:
# Translators: do not translate %(title)s, it is a string substitution
# marker (like %s) for the title of the current playing song.
new_status = _(u"♫ %(title)s ♫") % subs
else:
new_status = _(u"♫ Listening to music... ♫")
self.set_mc5_status (new_status)
self.set_purple_status (new_status)
def save_status (self):
self.saved_mc5 = self.get_mc5_status ()
self.saved_purple = self.get_purple_status ()
def restore_status (self):
if self.saved_mc5 != None:
self.set_mc5_status (self.saved_mc5)
if self.saved_purple != None:
self.set_purple_status (self.saved_purple)
def set_mc5_status (self, new_status):
try:
proxy = self.proxies["mc5_props"]
for acct_obj_path in proxy.Get("(ss)", MC5_AM_IFACE_NAME, "ValidAccounts"):
# Create a new proxy connected to acct_obj_path
acct_proxy = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SESSION, 0, None,
MC5_BUS_NAME, acct_obj_path,
PROPERTIES_IFACE_NAME, None)
# status = (state, status, status_message)
status = acct_proxy.Get("(ss)", MC5_ACCT_IFACE_NAME, "RequestedPresence")
# Create the (uss) GVariant to set the new status message
vstatus = GLib.Variant("(uss)", (status[0], status[1], new_status))
# Set the status!
acct_proxy.Set("(ssv)", MC5_ACCT_IFACE_NAME, "RequestedPresence", vstatus)
except gi._glib.GError as e:
print ("GError while setting status: " + str(e))
def get_mc5_status (self):
try:
proxy = self.proxies["mc5_props"]
got_status = False
# a bit awful: this just returns the status text from the first account
# that has one.
for acct_obj_path in proxy.Get("(ss)", MC5_AM_IFACE_NAME, "ValidAccounts"):
# Create a new proxy connected to acct_obj_path
acct_proxy = Gio.DBusProxy.new_for_bus_sync (Gio.BusType.SESSION, 0, None,
MC5_BUS_NAME, acct_obj_path,
PROPERTIES_IFACE_NAME, None)
# Get (state, status, status_message)
ret = acct_proxy.Get("(ss)", MC5_ACCT_IFACE_NAME, "RequestedPresence")
got_status = True
if ret[2] != "":
return ret[2]
# if all accounts have empty status, return that
if got_status:
return ""
except gi._glib.GError as e:
print ("GError while setting status: " + str(e))
return None
def set_purple_status (self, new_status):
try:
proxy = self.proxies["purple"]
status = proxy.PurpleSavedstatusGetCurrent()
proxy.PurpleSavedstatusSetMessage("(is)", status, new_status)
proxy.PurpleSavedstatusActivate("(i)", status)
except gi._glib.GError as e:
print ("GError while setting status: " + str(e))
def get_purple_status (self):
try:
proxy = self.proxies["purple"]
status = proxy.PurpleSavedstatusGetCurrent()
return proxy.PurpleSavedstatusGetMessage("(i)", status)
except gi._glib.GError as e:
print ("GError while setting status: " + str(e))
return None
|
tangp3/gpdb
|
refs/heads/master
|
gpMgmt/bin/pythonSrc/subprocess32/testdata/qgrep.py
|
241
|
"""When called with a single argument, simulated fgrep with a single
argument and no options."""
import sys
if __name__ == "__main__":
pattern = sys.argv[1]
for line in sys.stdin:
if pattern in line:
sys.stdout.write(line)
|
skbly7/serc
|
refs/heads/master
|
website/website/wsgi.py
|
16
|
"""
WSGI config for website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings")
application = get_wsgi_application()
|
unicri/edx-platform
|
refs/heads/master
|
common/djangoapps/student/tests/test_email.py
|
6
|
import json
import django.db
import unittest
from student.tests.factories import UserFactory, RegistrationFactory, PendingEmailChangeFactory
from student.views import (
reactivation_email_for_user, change_email_request, do_email_change_request, confirm_email_change,
SETTING_CHANGE_INITIATED
)
from student.models import UserProfile, PendingEmailChange
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.auth.models import User, AnonymousUser
from django.test import TestCase, TransactionTestCase
from django.test.client import RequestFactory
from mock import Mock, patch
from django.http import Http404, HttpResponse
from django.conf import settings
from edxmako.shortcuts import render_to_string
from edxmako.tests import mako_middleware_process_request
from util.request import safe_get_host
from util.testing import EventTestMixin
class TestException(Exception):
"""Exception used for testing that nothing will catch explicitly"""
pass
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, sorted(context.iteritems())))
def mock_render_to_response(template_name, context):
"""Return an HttpResponse with content that encodes template_name and context"""
# View confirm_email_change uses @transaction.commit_manually.
# This simulates any db access in the templates.
UserProfile.objects.exists()
return HttpResponse(mock_render_to_string(template_name, context))
class EmailTestMixin(object):
"""Adds useful assertions for testing `email_user`"""
def assertEmailUser(self, email_user, subject_template, subject_context, body_template, body_context):
"""Assert that `email_user` was used to send and email with the supplied subject and body
`email_user`: The mock `django.contrib.auth.models.User.email_user` function
to verify
`subject_template`: The template to have been used for the subject
`subject_context`: The context to have been used for the subject
`body_template`: The template to have been used for the body
`body_context`: The context to have been used for the body
"""
email_user.assert_called_with(
mock_render_to_string(subject_template, subject_context),
mock_render_to_string(body_template, body_context),
settings.DEFAULT_FROM_EMAIL
)
def append_allowed_hosts(self, hostname):
""" Append hostname to settings.ALLOWED_HOSTS """
settings.ALLOWED_HOSTS.append(hostname)
self.addCleanup(settings.ALLOWED_HOSTS.pop)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class ActivationEmailTests(TestCase):
"""Test sending of the activation email. """
ACTIVATION_SUBJECT = "Activate Your edX Account"
# Text fragments we expect in the body of an email
# sent from an OpenEdX installation.
OPENEDX_FRAGMENTS = [
"Thank you for signing up for {platform}.".format(platform=settings.PLATFORM_NAME),
"http://edx.org/activate/",
(
"if you require assistance, check the help section of the "
"{platform} website".format(platform=settings.PLATFORM_NAME)
)
]
# Text fragments we expect in the body of an email
# sent from an EdX-controlled domain.
EDX_DOMAIN_FRAGMENTS = [
"Thank you for signing up for {platform}".format(platform=settings.PLATFORM_NAME),
"http://edx.org/activate/",
"https://www.edx.org/contact-us",
"This email was automatically sent by edx.org"
]
def setUp(self):
super(ActivationEmailTests, self).setUp()
def test_activation_email(self):
self._create_account()
self._assert_activation_email(self.ACTIVATION_SUBJECT, self.OPENEDX_FRAGMENTS)
@patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': True})
def test_activation_email_edx_domain(self):
self._create_account()
self._assert_activation_email(self.ACTIVATION_SUBJECT, self.EDX_DOMAIN_FRAGMENTS)
def _create_account(self):
"""Create an account, triggering the activation email. """
url = reverse('create_account')
params = {
'username': 'test_user',
'email': 'test_user@example.com',
'password': 'edx',
'name': 'Test User',
'honor_code': True,
'terms_of_service': True
}
resp = self.client.post(url, params)
self.assertEqual(
resp.status_code, 200,
msg=u"Could not create account (status {status}). The response was {response}".format(
status=resp.status_code,
response=resp.content
)
)
def _assert_activation_email(self, subject, body_fragments):
"""Verify that the activation email was sent. """
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.subject, subject)
for fragment in body_fragments:
self.assertIn(fragment, msg.body)
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
@patch('django.contrib.auth.models.User.email_user')
class ReactivationEmailTests(EmailTestMixin, TestCase):
"""Test sending a reactivation email to a user"""
def setUp(self):
self.user = UserFactory.create()
self.unregisteredUser = UserFactory.create()
self.registration = RegistrationFactory.create(user=self.user)
def reactivation_email(self, user):
"""
Send the reactivation email to the specified user,
and return the response as json data.
"""
return json.loads(reactivation_email_for_user(user).content)
def assertReactivateEmailSent(self, email_user):
"""Assert that the correct reactivation email has been sent"""
context = {
'name': self.user.profile.name,
'key': self.registration.activation_key
}
self.assertEmailUser(
email_user,
'emails/activation_email_subject.txt',
context,
'emails/activation_email.txt',
context
)
# Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check
request = RequestFactory().post('unused_url')
request.user = self.user
request.META['HTTP_HOST'] = "aGenericValidHostName"
self.append_allowed_hosts("aGenericValidHostName")
mako_middleware_process_request(request)
body = render_to_string('emails/activation_email.txt', context)
host = safe_get_host(request)
self.assertIn(host, body)
def test_reactivation_email_failure(self, email_user):
self.user.email_user.side_effect = Exception
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertFalse(response_data['success'])
def test_reactivation_for_unregistered_user(self, email_user):
"""
Test that trying to send a reactivation email to an unregistered
user fails without throwing a 500 error.
"""
response_data = self.reactivation_email(self.unregisteredUser)
self.assertFalse(response_data['success'])
def test_reactivation_email_success(self, email_user):
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertTrue(response_data['success'])
class EmailChangeRequestTests(EventTestMixin, TestCase):
"""Test changing a user's email address"""
def setUp(self):
super(EmailChangeRequestTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.new_email = 'new.email@edx.org'
self.req_factory = RequestFactory()
self.request = self.req_factory.post('unused_url', data={
'password': 'test',
'new_email': self.new_email
})
self.request.user = self.user
self.user.email_user = Mock()
def run_request(self, request=None):
"""Execute request and return result parsed as json
If request isn't passed in, use self.request instead
"""
if request is None:
request = self.request
response = change_email_request(self.request)
return json.loads(response.content)
def assertFailedRequest(self, response_data, expected_error):
"""Assert that `response_data` indicates a failed request that returns `expected_error`"""
self.assertFalse(response_data['success'])
self.assertEquals(expected_error, response_data['error'])
self.assertFalse(self.user.email_user.called)
def test_unauthenticated(self):
self.request.user = AnonymousUser()
self.request.user.email_user = Mock()
with self.assertRaises(Http404):
change_email_request(self.request)
self.assertFalse(self.request.user.email_user.called)
def test_invalid_password(self):
self.request.POST['password'] = 'wrong'
self.assertFailedRequest(self.run_request(), 'Invalid password')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_duplicate_activation_key(self):
"""Assert that if two users change Email address simultaneously, server should return 200"""
# New emails for the users
user1_new_email = "valid_user1_email@example.com"
user2_new_email = "valid_user2_email@example.com"
# Set new email for user1.
self.request.POST['new_email'] = user1_new_email
# Create a another user 'user2' & make request for change email
user2 = UserFactory.create(email=self.new_email, password="test2")
user2_request = self.req_factory.post('unused_url', data={
'password': 'test2',
'new_email': user2_new_email
})
user2_request.user = user2
# Send requests & check if response was successful
user1_response = change_email_request(self.request)
user2_response = change_email_request(user2_request)
self.assertEqual(user1_response.status_code, 200)
self.assertEqual(user2_response.status_code, 200)
def test_invalid_emails(self):
for email in ('bad_email', 'bad_email@', '@bad_email'):
self.request.POST['new_email'] = email
self.assertFailedRequest(self.run_request(), 'Valid e-mail address required.')
def test_change_email_to_existing_value(self):
""" Test the error message if user attempts to change email to the existing value. """
self.request.POST['new_email'] = self.user.email
self.assertFailedRequest(self.run_request(), 'Old email is the same as the new email.')
def check_duplicate_email(self, email):
"""Test that a request to change a users email to `email` fails"""
request = self.req_factory.post('unused_url', data={
'new_email': email,
'password': 'test',
})
request.user = self.user
self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')
def test_duplicate_email(self):
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email)
def test_capitalized_duplicate_email(self):
"""Test that we check for email addresses in a case insensitive way"""
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email.capitalize())
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_email_failure(self, send_mail):
""" Test the return value if sending the email for the user to click fails. """
send_mail.side_effect = [Exception, None]
self.request.POST['new_email'] = "valid@email.com"
self.assertFailedRequest(self.run_request(), 'Unable to send email activation link. Please try again later.')
self.assert_no_events_were_emitted()
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_email_success(self, send_mail):
""" Test email was sent if no errors encountered. """
old_email = self.user.email
new_email = "valid@example.com"
registration_key = "test registration key"
do_email_change_request(self.user, new_email, registration_key)
context = {
'key': registration_key,
'old_email': old_email,
'new_email': new_email
}
send_mail.assert_called_with(
mock_render_to_string('emails/email_change_subject.txt', context),
mock_render_to_string('emails/email_change.txt', context),
settings.DEFAULT_FROM_EMAIL,
[new_email]
)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'email', old=old_email, new=new_email
)
@patch('django.contrib.auth.models.User.email_user')
@patch('student.views.render_to_response', Mock(side_effect=mock_render_to_response, autospec=True))
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
class EmailChangeConfirmationTests(EmailTestMixin, TransactionTestCase):
"""Test that confirmation of email change requests function even in the face of exceptions thrown while sending email"""
def setUp(self):
self.user = UserFactory.create()
self.profile = UserProfile.objects.get(user=self.user)
self.req_factory = RequestFactory()
self.request = self.req_factory.get('unused_url')
self.request.user = self.user
self.user.email_user = Mock()
self.pending_change_request = PendingEmailChangeFactory.create(user=self.user)
self.key = self.pending_change_request.activation_key
def assertRolledBack(self):
"""Assert that no changes to user, profile, or pending email have been made to the db"""
self.assertEquals(self.user.email, User.objects.get(username=self.user.username).email)
self.assertEquals(self.profile.meta, UserProfile.objects.get(user=self.user).meta)
self.assertEquals(1, PendingEmailChange.objects.count())
def assertFailedBeforeEmailing(self, email_user):
"""Assert that the function failed before emailing a user"""
self.assertRolledBack()
self.assertFalse(email_user.called)
def check_confirm_email_change(self, expected_template, expected_context):
"""Call `confirm_email_change` and assert that the content was generated as expected
`expected_template`: The name of the template that should have been used
to generate the content
`expected_context`: The context dictionary that should have been used to
generate the content
"""
response = confirm_email_change(self.request, self.key)
self.assertEquals(
mock_render_to_response(expected_template, expected_context).content,
response.content
)
def assertChangeEmailSent(self, email_user):
"""Assert that the correct email was sent to confirm an email change"""
context = {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email,
}
self.assertEmailUser(
email_user,
'emails/email_change_subject.txt',
context,
'emails/confirm_email_change.txt',
context
)
# Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check
request = RequestFactory().post('unused_url')
request.user = self.user
request.META['HTTP_HOST'] = "aGenericValidHostName"
self.append_allowed_hosts("aGenericValidHostName")
mako_middleware_process_request(request)
body = render_to_string('emails/confirm_email_change.txt', context)
url = safe_get_host(request)
self.assertIn(url, body)
def test_not_pending(self, email_user):
self.key = 'not_a_key'
self.check_confirm_email_change('invalid_email_key.html', {})
self.assertFailedBeforeEmailing(email_user)
def test_duplicate_email(self, email_user):
UserFactory.create(email=self.pending_change_request.new_email)
self.check_confirm_email_change('email_exists.html', {})
self.assertFailedBeforeEmailing(email_user)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
def test_old_email_fails(self, email_user):
email_user.side_effect = [Exception, None]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.user.email,
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
def test_new_email_fails(self, email_user):
email_user.side_effect = [None, Exception]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.pending_change_request.new_email
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
def test_successful_email_change(self, email_user):
self.check_confirm_email_change('email_change_successful.html', {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email
})
self.assertChangeEmailSent(email_user)
meta = json.loads(UserProfile.objects.get(user=self.user).meta)
self.assertIn('old_emails', meta)
self.assertEquals(self.user.email, meta['old_emails'][0][0])
self.assertEquals(
self.pending_change_request.new_email,
User.objects.get(username=self.user.username).email
)
self.assertEquals(0, PendingEmailChange.objects.count())
@patch('student.views.PendingEmailChange.objects.get', Mock(side_effect=TestException))
@patch('student.views.transaction.rollback', wraps=django.db.transaction.rollback)
def test_always_rollback(self, rollback, _email_user):
with self.assertRaises(TestException):
confirm_email_change(self.request, self.key)
rollback.assert_called_with()
|
pedrotari7/advent_of_code
|
refs/heads/master
|
py/2015/22B.py
|
1
|
import Queue, copy
state = dict()
state['player'] = {'hit':50,'mana':500,'armor':0,'spent':0}
state['boss'] = {'hit': 58,'damage':9}
state['spells'] = {}
state['player_turn'] = True
state['seq'] = []
effects = dict()
effects['missile'] = {'cost':53,'damage':4,'hit':0,'mana':0,'duration':0,'armor':0}
effects['drain'] = {'cost':73,'damage':2,'hit':2,'mana':0,'duration':0,'armor':0}
effects['shield'] = {'cost':113,'damage':0,'hit':0,'mana':0,'duration':6,'armor':7}
effects['poison'] = {'cost':173,'damage':3,'hit':0,'mana':0,'duration':6,'armor':0}
effects['recharge'] = {'cost':229,'damage':0,'hit':0,'mana':101,'duration':5,'armor':0}
best = state['player']['mana']**10
q = Queue.Queue()
q.put(state)
while not q.empty():
s = q.get()
if s['player']['hit'] <= 0 or s['player']['spent'] > best:
continue
if s['boss']['hit'] <= 0:
if best > s['player']['spent']:
best = s['player']['spent']
print best
continue
for active in s['spells']:
s['boss']['hit'] -= effects[active]['damage']
s['player']['mana'] += effects[active]['mana']
s['spells'][active] -= 1
for to_delete in [d for d in s['spells'] if s['spells'][d] == 0]:
del s['spells'][to_delete]
if to_delete == 'shield':
s['player']['armor'] = 0
if s['player_turn']:
s['player']['hit'] -= 1
s['player_turn'] = not s['player_turn']
for spell in effects:
if spell not in s['spells'] and effects[spell]['cost'] <= s['player']['mana']:
cs = copy.deepcopy(s)
if effects[spell]['duration'] > 0:
cs['spells'][spell] = effects[spell]['duration']
if spell == 'shield':
cs['player']['armor'] = effects[spell]['armor']
else:
cs['boss']['hit'] -= effects[spell]['damage']
cs['player']['hit'] += effects[spell]['hit']
cs['player']['spent'] += effects[spell]['cost']
cs['player']['mana'] -= effects[spell]['cost']
cs['seq'].append((spell,cs['player']['spent'],cs['player']['hit'],cs['boss']['hit'],cs['player']['armor']))
q.put(cs)
else:
s['player_turn'] = not s['player_turn']
s['player']['hit'] -= max(1,s['boss']['damage']-s['player']['armor'])
q.put(s)
print best
|
alliejones/zulip
|
refs/heads/master
|
puppet/zulip_internal/files/postgresql/pg_backup_and_purge.py
|
8
|
#!/usr/bin/env python2.7
from __future__ import print_function
import subprocess
import sys
import logging
import dateutil.parser
import pytz
from datetime import datetime, timedelta
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def run(args, dry_run=False):
if dry_run:
print("Would have run: " + " ".join(args))
return ""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
logger.error("Could not invoke %s\nstdout: %s\nstderror: %s"
% (args[0], stdout, stderr))
sys.exit(1)
return stdout
# Only run if we're the master
if run(['psql', '-t', '-c', 'select pg_is_in_recovery()']).strip() != 'f':
sys.exit(0)
run(['env-wal-e', 'backup-push', '/var/lib/postgresql/9.1/main'])
now = datetime.now(tz=pytz.utc)
with open('/var/lib/nagios_state/last_postgres_backup', 'w') as f:
f.write(now.isoformat())
f.write("\n")
backups = {}
lines = run(['env-wal-e', 'backup-list']).split("\n")
for line in lines[1:]:
if line:
backup_name, date, _, _ = line.split()
backups[dateutil.parser.parse(date)] = backup_name
one_month_ago = now - timedelta(days=30)
for date in sorted(backups.keys(), reverse=True):
if date < one_month_ago:
run(['env-wal-e', 'delete', '--confirm', 'before', backups[date]])
# Because we're going from most recent to least recent, we
# only have to do one delete operation
break
|
JimCircadian/ansible
|
refs/heads/devel
|
test/sanity/code-smell/no-underscore-variable.py
|
30
|
#!/usr/bin/env python
# Only needed until we can enable a pylint test for this. We may have to write
# one or add it to another existing test (like the one to warn on inappropriate
# variable names). Adding to an existing test may be hard as we may have many
# other things that are not compliant with that test.
import os
import re
import sys
def main():
skip = set([
'test/sanity/code-smell/%s' % os.path.basename(__file__),
# These files currently use _ as a variable. Fix them and then remove them
# from this list. Note that we're not sure if we'll translate module return
# values. If we decide never to do that, then we can stop checking for those.
'contrib/inventory/gce.py',
'lib/ansible/cli/console.py',
'lib/ansible/compat/selectors/_selectors2.py',
'lib/ansible/executor/playbook_executor.py',
'lib/ansible/executor/task_queue_manager.py',
'lib/ansible/module_utils/facts/network/linux.py',
'lib/ansible/module_utils/urls.py',
'lib/ansible/modules/cloud/amazon/data_pipeline.py',
'lib/ansible/modules/cloud/amazon/ec2_group_facts.py',
'lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py',
'lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py',
'lib/ansible/modules/cloud/amazon/efs.py',
'lib/ansible/modules/cloud/amazon/efs_facts.py',
'lib/ansible/modules/cloud/amazon/kinesis_stream.py',
'lib/ansible/modules/cloud/amazon/route53_zone.py',
'lib/ansible/modules/cloud/amazon/s3_sync.py',
'lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py',
'lib/ansible/modules/cloud/docker/docker_container.py',
'lib/ansible/modules/cloud/docker/docker_service.py',
'lib/ansible/modules/cloud/google/gce.py',
'lib/ansible/modules/cloud/google/gce_eip.py',
'lib/ansible/modules/cloud/google/gce_img.py',
'lib/ansible/modules/cloud/google/gce_instance_template.py',
'lib/ansible/modules/cloud/google/gce_lb.py',
'lib/ansible/modules/cloud/google/gce_mig.py',
'lib/ansible/modules/cloud/google/gce_net.py',
'lib/ansible/modules/cloud/google/gce_pd.py',
'lib/ansible/modules/cloud/google/gce_snapshot.py',
'lib/ansible/modules/cloud/google/gce_tag.py',
'lib/ansible/modules/cloud/google/gcp_backend_service.py',
'lib/ansible/modules/cloud/google/gcp_healthcheck.py',
'lib/ansible/modules/cloud/lxc/lxc_container.py',
'lib/ansible/modules/files/copy.py',
'lib/ansible/modules/files/patch.py',
'lib/ansible/modules/files/synchronize.py',
'lib/ansible/modules/monitoring/statusio_maintenance.py',
'lib/ansible/modules/monitoring/zabbix/zabbix_maintenance.py',
'lib/ansible/modules/net_tools/basics/uri.py',
'lib/ansible/modules/network/cloudengine/ce_acl.py',
'lib/ansible/modules/network/cloudengine/ce_command.py',
'lib/ansible/modules/network/cloudengine/ce_dldp_interface.py',
'lib/ansible/modules/network/cloudengine/ce_mlag_interface.py',
'lib/ansible/modules/network/cloudvision/cv_server_provision.py',
'lib/ansible/modules/network/f5/bigip_remote_syslog.py',
'lib/ansible/modules/network/illumos/dladm_etherstub.py',
'lib/ansible/modules/network/illumos/dladm_iptun.py',
'lib/ansible/modules/network/illumos/dladm_linkprop.py',
'lib/ansible/modules/network/illumos/dladm_vlan.py',
'lib/ansible/modules/network/illumos/dladm_vnic.py',
'lib/ansible/modules/network/illumos/flowadm.py',
'lib/ansible/modules/network/illumos/ipadm_addr.py',
'lib/ansible/modules/network/illumos/ipadm_addrprop.py',
'lib/ansible/modules/network/illumos/ipadm_if.py',
'lib/ansible/modules/network/illumos/ipadm_ifprop.py',
'lib/ansible/modules/network/illumos/ipadm_prop.py',
'lib/ansible/modules/network/vyos/vyos_command.py',
'lib/ansible/modules/packaging/language/pip.py',
'lib/ansible/modules/packaging/os/yum.py',
'lib/ansible/modules/source_control/git.py',
'lib/ansible/modules/system/alternatives.py',
'lib/ansible/modules/system/beadm.py',
'lib/ansible/modules/system/cronvar.py',
'lib/ansible/modules/system/dconf.py',
'lib/ansible/modules/system/filesystem.py',
'lib/ansible/modules/system/gconftool2.py',
'lib/ansible/modules/system/interfaces_file.py',
'lib/ansible/modules/system/iptables.py',
'lib/ansible/modules/system/java_cert.py',
'lib/ansible/modules/system/lvg.py',
'lib/ansible/modules/system/lvol.py',
'lib/ansible/modules/system/parted.py',
'lib/ansible/modules/system/timezone.py',
'lib/ansible/modules/system/ufw.py',
'lib/ansible/modules/utilities/logic/wait_for.py',
'lib/ansible/modules/web_infrastructure/rundeck_acl_policy.py',
'lib/ansible/parsing/vault/__init__.py',
'lib/ansible/playbook/base.py',
'lib/ansible/playbook/helpers.py',
'lib/ansible/playbook/role/__init__.py',
'lib/ansible/playbook/taggable.py',
'lib/ansible/plugins/callback/hipchat.py',
'lib/ansible/plugins/connection/lxc.py',
'lib/ansible/plugins/filter/core.py',
'lib/ansible/plugins/lookup/sequence.py',
'lib/ansible/plugins/strategy/__init__.py',
'lib/ansible/plugins/strategy/linear.py',
'test/legacy/cleanup_gce.py',
'test/legacy/gce_credentials.py',
'test/runner/lib/cloud/cs.py',
'test/runner/lib/core_ci.py',
'test/runner/lib/delegation.py',
'test/runner/lib/docker_util.py',
'test/runner/lib/executor.py',
'test/runner/lib/http.py',
'test/runner/lib/import_analysis.py',
'test/runner/lib/manage_ci.py',
'test/runner/lib/target.py',
'test/runner/lib/util.py',
'test/sanity/import/importer.py',
'test/sanity/validate-modules/main.py',
'test/units/executor/test_play_iterator.py',
'test/units/module_utils/basic/test_run_command.py',
'test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py',
'test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py',
'test/units/modules/system/interfaces_file/test_interfaces_file.py',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(?: |[^C]\()(_)(?: |,|\))', text)
if match:
print('%s:%d:%d: use `dummy` instead of `_` for a variable name' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
|
kalaalto/fMBT
|
refs/heads/master
|
utils/fmbtwindows_agent.py
|
1
|
# fMBT, free Model Based Testing tool
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
import ctypes
import ctypes.wintypes
import glob
import os
import string
import struct
import subprocess
import sys
import thread
import time
import zlib
try:
import wmi # try to import wmi, requires that pywin32 and wmi packages
# are installed in DUT
except:
pass
_mouse_input_area = (1920, 1080)
_HTTPServerProcess = None
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
# For touchMask
TOUCH_MASK_NONE = 0x00000000
TOUCH_MASK_CONTACTAREA = 0x00000001
TOUCH_MASK_ORIENTATION = 0x00000002
TOUCH_MASK_PRESSURE = 0x00000004
TOUCH_MASK_ALL = 0x00000007
# For touchFlag
TOUCH_FLAG_NONE = 0x00000000
# For pointerType
PT_POINTER = 0x00000001
PT_TOUCH = 0x00000002
PT_PEN = 0x00000003
PT_MOUSE = 0x00000004
# For pointerFlags
POINTER_FLAG_NONE = 0x00000000
POINTER_FLAG_NEW = 0x00000001
POINTER_FLAG_INRANGE = 0x00000002
POINTER_FLAG_INCONTACT = 0x00000004
POINTER_FLAG_FIRSTBUTTON = 0x00000010
POINTER_FLAG_SECONDBUTTON = 0x00000020
POINTER_FLAG_THIRDBUTTON = 0x00000040
POINTER_FLAG_FOURTHBUTTON = 0x00000080
POINTER_FLAG_FIFTHBUTTON = 0x00000100
POINTER_FLAG_PRIMARY = 0x00002000
POINTER_FLAG_CONFIDENCE = 0x00004000
POINTER_FLAG_CANCELED = 0x00008000
POINTER_FLAG_DOWN = 0x00010000
POINTER_FLAG_UPDATE = 0x00020000
POINTER_FLAG_UP = 0x00040000
POINTER_FLAG_WHEEL = 0x00080000
POINTER_FLAG_HWHEEL = 0x00100000
POINTER_FLAG_CAPTURECHANGED = 0x00200000
WHEEL_DELTA = 120
XBUTTON1 = 0x0001
XBUTTON2 = 0x0002
MOUSEEVENTF_ABSOLUTE = 0x8000
MOUSEEVENTF_HWHEEL = 0x01000
MOUSEEVENTF_MOVE = 0x0001
MOUSEEVENTF_MOVE_NOCOALESCE = 0x2000
MOUSEEVENTF_LEFTDOWN = 0x0002
MOUSEEVENTF_LEFTUP = 0x0004
MOUSEEVENTF_RIGHTDOWN = 0x0008
MOUSEEVENTF_RIGHTUP = 0x0010
MOUSEEVENTF_MIDDLEDOWN = 0x0020
MOUSEEVENTF_MIDDLEUP = 0x0040
MOUSEEVENTF_VIRTUALDESK = 0x4000
MOUSEEVENTF_WHEEL = 0x0800
MOUSEEVENTF_XDOWN = 0x0080
MOUSEEVENTF_XUP = 0x0100
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
VK_LBUTTON = 0x01 # Left mouse button
VK_RBUTTON = 0x02 # Right mouse button
VK_CANCEL = 0x03 # Control-break processing
VK_MBUTTON = 0x04 # Middle mouse button (three-button mouse)
VK_XBUTTON1 = 0x05 # X1 mouse button
VK_XBUTTON2 = 0x06 # X2 mouse button
VK_BACK = 0x08 # BACKSPACE key
VK_TAB = 0x09 # TAB key
VK_CLEAR = 0x0C # CLEAR key
VK_RETURN = 0x0D # ENTER key
VK_SHIFT = 0x10 # SHIFT key
VK_CONTROL = 0x11 # CTRL key
VK_MENU = 0x12 # ALT key
VK_PAUSE = 0x13 # PAUSE key
VK_CAPITAL = 0x14 # CAPS LOCK key
VK_KANA = 0x15 # IME Kana mode
VK_HANGUL = 0x15 # IME Hangul mode
VK_JUNJA = 0x17 # IME Junja mode
VK_FINAL = 0x18 # IME final mode
VK_HANJA = 0x19 # IME Hanja mode
VK_KANJI = 0x19 # IME Kanji mode
VK_ESCAPE = 0x1B # ESC key
VK_CONVERT = 0x1C # IME convert
VK_NONCONVERT = 0x1D # IME nonconvert
VK_ACCEPT = 0x1E # IME accept
VK_MODECHANGE = 0x1F # IME mode change request
VK_SPACE = 0x20 # SPACEBAR
VK_PRIOR = 0x21 # PAGE UP key
VK_NEXT = 0x22 # PAGE DOWN key
VK_END = 0x23 # END key
VK_HOME = 0x24 # HOME key
VK_LEFT = 0x25 # LEFT ARROW key
VK_UP = 0x26 # UP ARROW key
VK_RIGHT = 0x27 # RIGHT ARROW key
VK_DOWN = 0x28 # DOWN ARROW key
VK_SELECT = 0x29 # SELECT key
VK_PRINT = 0x2A # PRINT key
VK_EXECUTE = 0x2B # EXECUTE key
VK_SNAPSHOT = 0x2C # PRINT SCREEN key
VK_INSERT = 0x2D # INS key
VK_DELETE = 0x2E # DEL key
VK_HELP = 0x2F # HELP key
VK_LWIN = 0x5B # Left Windows key (Natural keyboard)
VK_RWIN = 0x5C # Right Windows key (Natural keyboard)
VK_APPS = 0x5D # Applications key (Natural keyboard)
VK_SLEEP = 0x5F # Computer Sleep key
VK_NUMPAD0 = 0x60 # Numeric keypad 0 key
VK_NUMPAD1 = 0x61 # Numeric keypad 1 key
VK_NUMPAD2 = 0x62 # Numeric keypad 2 key
VK_NUMPAD3 = 0x63 # Numeric keypad 3 key
VK_NUMPAD4 = 0x64 # Numeric keypad 4 key
VK_NUMPAD5 = 0x65 # Numeric keypad 5 key
VK_NUMPAD6 = 0x66 # Numeric keypad 6 key
VK_NUMPAD7 = 0x67 # Numeric keypad 7 key
VK_NUMPAD8 = 0x68 # Numeric keypad 8 key
VK_NUMPAD9 = 0x69 # Numeric keypad 9 key
VK_MULTIPLY = 0x6A # Multiply key
VK_ADD = 0x6B # Add key
VK_SEPARATOR = 0x6C # Separator key
VK_SUBTRACT = 0x6D # Subtract key
VK_DECIMAL = 0x6E # Decimal key
VK_DIVIDE = 0x6F # Divide key
VK_F1 = 0x70 # F1 key
VK_F2 = 0x71 # F2 key
VK_F3 = 0x72 # F3 key
VK_F4 = 0x73 # F4 key
VK_F5 = 0x74 # F5 key
VK_F6 = 0x75 # F6 key
VK_F7 = 0x76 # F7 key
VK_F8 = 0x77 # F8 key
VK_F9 = 0x78 # F9 key
VK_F10 = 0x79 # F10 key
VK_F11 = 0x7A # F11 key
VK_F12 = 0x7B # F12 key
VK_F13 = 0x7C # F13 key
VK_F14 = 0x7D # F14 key
VK_F15 = 0x7E # F15 key
VK_F16 = 0x7F # F16 key
VK_F17 = 0x80 # F17 key
VK_F18 = 0x81 # F18 key
VK_F19 = 0x82 # F19 key
VK_F20 = 0x83 # F20 key
VK_F21 = 0x84 # F21 key
VK_F22 = 0x85 # F22 key
VK_F23 = 0x86 # F23 key
VK_F24 = 0x87 # F24 key
VK_NUMLOCK = 0x90 # NUM LOCK key
VK_SCROLL = 0x91 # SCROLL LOCK key
VK_LSHIFT = 0xA0 # Left SHIFT key
VK_RSHIFT = 0xA1 # Right SHIFT key
VK_LCONTROL = 0xA2 # Left CONTROL key
VK_RCONTROL = 0xA3 # Right CONTROL key
VK_LMENU = 0xA4 # Left MENU key
VK_RMENU = 0xA5 # Right MENU key
VK_BROWSER_BACK = 0xA6 # Browser Back key
VK_BROWSER_FORWARD = 0xA7 # Browser Forward key
VK_BROWSER_REFRESH = 0xA8 # Browser Refresh key
VK_BROWSER_STOP = 0xA9 # Browser Stop key
VK_BROWSER_SEARCH = 0xAA # Browser Search key
VK_BROWSER_FAVORITES = 0xAB # Browser Favorites key
VK_BROWSER_HOME = 0xAC # Browser Start and Home key
VK_VOLUME_MUTE = 0xAD # Volume Mute key
VK_VOLUME_DOWN = 0xAE # Volume Down key
VK_VOLUME_UP = 0xAF # Volume Up key
VK_MEDIA_NEXT_TRACK = 0xB0 # Next Track key
VK_MEDIA_PREV_TRACK = 0xB1 # Previous Track key
VK_MEDIA_STOP = 0xB2 # Stop Media key
VK_MEDIA_PLAY_PAUSE = 0xB3 # Play/Pause Media key
VK_LAUNCH_MAIL = 0xB4 # Start Mail key
VK_LAUNCH_MEDIA_SELECT = 0xB5 # Select Media key
VK_LAUNCH_APP1 = 0xB6 # Start Application 1 key
VK_LAUNCH_APP2 = 0xB7 # Start Application 2 key
VK_OEM_1 = 0xBA # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the ';:' key
VK_OEM_PLUS = 0xBB # For any country/region, the '+' key
VK_OEM_COMMA = 0xBC # For any country/region, the ',' key
VK_OEM_MINUS = 0xBD # For any country/region, the '-' key
VK_OEM_PERIOD = 0xBE # For any country/region, the '.' key
VK_OEM_2 = 0xBF # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '/?' key
VK_OEM_3 = 0xC0 # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '`~' key
VK_OEM_4 = 0xDB # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '[{' key
VK_OEM_5 = 0xDC # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '\|' key
VK_OEM_6 = 0xDD # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the ']}' key
VK_OEM_7 = 0xDE # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the 'single-quote/double-quote' key
VK_OEM_8 = 0xDF # Used for miscellaneous characters; it can vary by keyboard.
VK_OEM_102 = 0xE2 # Either the angle bracket key or the backslash key on the RT 102-key keyboard
VK_PROCESSKEY = 0xE5 # IME PROCESS key
VK_PACKET = 0xE7 # Used to pass Unicode characters as if they were keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual Key value used for non-keyboard input methods. For more information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
VK_ATTN = 0xF6 # Attn key
VK_CRSEL = 0xF7 # CrSel key
VK_EXSEL = 0xF8 # ExSel key
VK_EREOF = 0xF9 # Erase EOF key
VK_PLAY = 0xFA # Play key
VK_ZOOM = 0xFB # Zoom key
VK_PA1 = 0xFD # PA1 key
VK_OEM_CLEAR = 0xFE # Clear key
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
KEY_0 = 0x30
KEY_1 = 0x31
KEY_2 = 0x32
KEY_3 = 0x33
KEY_4 = 0x34
KEY_5 = 0x35
KEY_6 = 0x36
KEY_7 = 0x37
KEY_8 = 0x38
KEY_9 = 0x39
KEY_A = 0x41
KEY_B = 0x42
KEY_C = 0x43
KEY_D = 0x44
KEY_E = 0x45
KEY_F = 0x46
KEY_G = 0x47
KEY_H = 0x48
KEY_I = 0x49
KEY_J = 0x4A
KEY_K = 0x4B
KEY_L = 0x4C
KEY_M = 0x4D
KEY_N = 0x4E
KEY_O = 0x4F
KEY_P = 0x50
KEY_Q = 0x51
KEY_R = 0x52
KEY_S = 0x53
KEY_T = 0x54
KEY_U = 0x55
KEY_V = 0x56
KEY_W = 0x57
KEY_X = 0x58
KEY_Y = 0x59
KEY_Z = 0x5A
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
# Structs for mouse and keyboard input
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
# Structs for touch input
class POINTER_INFO(ctypes.Structure):
_fields_ = [("pointerType", ctypes.c_uint32),
("pointerId", ctypes.c_uint32),
("frameId", ctypes.c_uint32),
("pointerFlags", ctypes.c_int),
("sourceDevice", ctypes.wintypes.HANDLE),
("hwndTarget", ctypes.wintypes.HWND),
("ptPixelLocation", ctypes.wintypes.POINT),
("ptHimetricLocation", ctypes.wintypes.POINT),
("ptPixelLocationRaw", ctypes.wintypes.POINT),
("ptHimetricLocationRaw", ctypes.wintypes.POINT),
("dwTime", DWORD),
("historyCount", ctypes.c_uint32),
("inputData", ctypes.c_int32),
("dwKeyStates", DWORD),
("PerformanceCount", ctypes.c_uint64),
("ButtonChangeType", ctypes.c_int)
]
class POINTER_TOUCH_INFO(ctypes.Structure):
_fields_ = [("pointerInfo", POINTER_INFO),
("touchFlags", ctypes.c_int),
("touchMask", ctypes.c_int),
("rcContact", ctypes.wintypes.RECT),
("rcContactRaw", ctypes.wintypes.RECT),
("orientation", ctypes.c_uint32),
("pressure", ctypes.c_uint32)]
# Initialize Pointer and Touch info
pointerInfo = POINTER_INFO(pointerType=PT_TOUCH,
pointerId=0,
ptPixelLocation=ctypes.wintypes.POINT(90,54))
touchInfo = POINTER_TOUCH_INFO(pointerInfo=pointerInfo,
touchFlags=TOUCH_FLAG_NONE,
touchMask=TOUCH_MASK_ALL,
rcContact=ctypes.wintypes.RECT(pointerInfo.ptPixelLocation.x-5,
pointerInfo.ptPixelLocation.y-5,
pointerInfo.ptPixelLocation.x+5,
pointerInfo.ptPixelLocation.y+5),
orientation=90,
pressure=32000)
if not "touchInfoLock" in globals():
touchInfoLock = thread.allocate_lock()
def setTouchCoords(touchInfo, x, y, fingerRadius=5):
touchInfo.pointerInfo.ptPixelLocation.x = x
touchInfo.pointerInfo.ptPixelLocation.y = y
touchInfo.rcContact.left = x - fingerRadius
touchInfo.rcContact.right = x + fingerRadius
touchInfo.rcContact.top = y - fingerRadius
touchInfo.rcContact.bottom = y + fingerRadius
def _sendTouch(pointerFlags, errorWhen="doTouch"):
touchInfo.pointerInfo.pointerFlags = pointerFlags
if (ctypes.windll.user32.InjectTouchInput(1, ctypes.byref(touchInfo)) == 0):
print "%s error: %s" % (errorWhen, ctypes.FormatError())
return False
else:
return True
def _touchHold():
# Keep updating previous touchDown or touchMove coordinates
# to avoid InjectTouchInput timeout
_touchUpdateInterval = 0.25 # seconds
while True:
time.sleep(_touchUpdateInterval)
touchInfoLock.acquire()
try:
if not (touchInfo.pointerInfo.pointerFlags & POINTER_FLAG_INCONTACT):
print "touch: no more contact"
break
if not _sendTouch(POINTER_FLAG_UPDATE |
POINTER_FLAG_INRANGE |
POINTER_FLAG_INCONTACT, "_touchHold"):
break
finally:
touchInfoLock.release()
def touchDown(x, y, fingerRadius=5):
touchInfoLock.acquire()
try:
setTouchCoords(touchInfo, x, y, fingerRadius)
ok = _sendTouch(POINTER_FLAG_DOWN |
POINTER_FLAG_INRANGE |
POINTER_FLAG_INCONTACT, "touchDown")
if ok:
thread.start_new_thread(_touchHold, ()) # update until raised
return ok
finally:
touchInfoLock.release()
def touchMove(x, y, fingerRadius=5):
touchInfoLock.acquire()
try:
setTouchCoords(touchInfo, x, y, fingerRadius)
return _sendTouch(POINTER_FLAG_UPDATE |
POINTER_FLAG_INRANGE |
POINTER_FLAG_INCONTACT, "touchMove")
finally:
touchInfoLock.release()
def touchUp(x, y, fingerRadius=5):
touchInfoLock.acquire()
try:
setTouchCoords(touchInfo, x, y, fingerRadius)
moveOk = _sendTouch(POINTER_FLAG_UPDATE |
POINTER_FLAG_INRANGE |
POINTER_FLAG_INCONTACT,
"touchUp move to final location")
return _sendTouch(POINTER_FLAG_UP, "touchUp") and moveOk
finally:
touchInfoLock.release()
def sendInput(*inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
def Input(structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(INPUT_MOUSE, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(INPUT_HARDWARE, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
def MouseInput(flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
def KeybdInput(code, flags):
return KEYBDINPUT(code, code, flags, 0, None)
def HardwareInput(message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
def Mouse(flags, x=0, y=0, data=0):
return Input(MouseInput(flags, x, y, data))
def Keyboard(code, flags=0):
return Input(KeybdInput(code, flags))
def Hardware(message, parameter=0):
return Input(HardwareInput(message, parameter))
################################################################################
UPPER = frozenset('~!@#$%^&*()_+QWERTYUIOP{}|ASDFGHJKL:"ZXCVBNM<>?')
LOWER = frozenset("`1234567890-=qwertyuiop[]\\asdfghjkl;'zxcvbnm,./")
ORDER = string.ascii_letters + string.digits + ' \b\r\t'
ALTER = dict(zip('!@#$%^&*()', '1234567890'))
OTHER = {'`': VK_OEM_3,
'~': VK_OEM_3,
'-': VK_OEM_MINUS,
'_': VK_OEM_MINUS,
'=': VK_OEM_PLUS,
'+': VK_OEM_PLUS,
'[': VK_OEM_4,
'{': VK_OEM_4,
']': VK_OEM_6,
'}': VK_OEM_6,
'\\': VK_OEM_5,
'|': VK_OEM_5,
';': VK_OEM_1,
':': VK_OEM_1,
"'": VK_OEM_7,
'"': VK_OEM_7,
',': VK_OEM_COMMA,
'<': VK_OEM_COMMA,
'.': VK_OEM_PERIOD,
'>': VK_OEM_PERIOD,
'/': VK_OEM_2,
'?': VK_OEM_2}
def pressKey(hexKeyCode):
event = Keyboard(hexKeyCode, 0)
return sendInput(event)
def releaseKey(hexKeyCode):
event = Keyboard(hexKeyCode, KEYEVENTF_KEYUP)
return sendInput(event)
def keyboardStream(string):
shiftPressed = False
for character in string.replace('\r\n', '\r').replace('\n', '\r'):
if shiftPressed and character in LOWER or not shiftPressed and character in UPPER:
yield Keyboard(VK_SHIFT, shiftPressed and KEYEVENTF_KEYUP)
shiftPressed = not shiftPressed
character = ALTER.get(character, character)
if character in ORDER:
code = ord(character.upper())
elif character in OTHER:
code = OTHER[character]
else:
continue
raise ValueError('String is not understood!')
yield Keyboard(code)
yield Keyboard(code, KEYEVENTF_KEYUP)
if shiftPressed:
yield Keyboard(VK_SHIFT, KEYEVENTF_KEYUP)
_g_lastWidth = None
_g_lastHeight = None
def zybgrSize():
"Return dimensions of most recently returned ZYBGR screenshot"
return _g_lastWidth, _g_lastHeight
def screenshotZYBGR(screenshotSize=(None, None)):
"Return width, height and zlib-compressed pixel data"
global _g_lastWidth, _g_lastHeight
width, height = screenshotSize
if width == None: # try autodetect
left = ctypes.windll.user32.GetSystemMetrics(SM_XVIRTUALSCREEN)
right =ctypes.windll.user32.GetSystemMetrics(SM_CXVIRTUALSCREEN)
width = right - left
else:
left = 0
if height == None:
top = ctypes.windll.user32.GetSystemMetrics(SM_YVIRTUALSCREEN)
bottom = ctypes.windll.user32.GetSystemMetrics(SM_CYVIRTUALSCREEN)
height = bottom - top
else:
top = 0
_g_lastWidth = width
_g_lastHeight = height
print "W x H ==", width, "X", height
# width = monitor['width']
# height = monitor['height']
# left = monitor['left']
# top = monitor['top']
SRCCOPY = 0xCC0020
DIB_RGB_COLORS = 0
srcdc = ctypes.windll.user32.GetDC(0)
memdc = ctypes.windll.gdi32.CreateCompatibleDC(srcdc)
bmp = ctypes.windll.gdi32.CreateCompatibleBitmap(srcdc, width, height)
ctypes.windll.gdi32.SelectObject(memdc, bmp)
ctypes.windll.gdi32.BitBlt(memdc, 0, 0, width, height, srcdc, left, top, SRCCOPY)
bmp_header = struct.pack('LHHHH', struct.calcsize('LHHHH'), width, height, 1, 24)
c_bmp_header = ctypes.c_buffer(bmp_header)
c_bits = ctypes.c_buffer(' ' * (height * ((width * 3 + 3) & -4)))
got_bits = ctypes.windll.gdi32.GetDIBits(memdc, bmp, 0, height,
c_bits, c_bmp_header, DIB_RGB_COLORS)
ctypes.windll.gdi32.DeleteObject(bmp)
ctypes.windll.gdi32.DeleteObject(memdc)
ctypes.windll.user32.ReleaseDC(0, srcdc)
return zlib.compress(c_bits.raw)
def sendType(text):
for event in keyboardStream(text):
sendInput(event)
time.sleep(0.05)
return True
def sendKey(keyname, modifiers):
mods = 0
'''
keyname can be either single character or constant defined in keyboard.py
w.pressKey("a")
w.pressKey("KEY_A")
modifier can be either VK_LWIN as defined in keyboard.py
or pure hex keycode 0x5B
w.pressKey("s",modifiers=["VK_LWIN"])
w.pressKey("KEY_A",modifiers=["VK_LSHIFT"])
'''
for m in modifiers:
print m
if "VK_" in str(m):
mods |= globals()[m]
else:
mods |= m
print sys._getframe().f_code.co_name, keyname, mods
if mods:
for m in modifiers:
if "VK_" in str(m):
pressKey(globals()[m])
else:
pressKey(m)
print "modifier down:", m
if len(keyname) == 1:
print "key down:", ord(keyname)
pressKey(ord(keyname.upper()))
time.sleep(0.1)
print "key up:", ord(keyname)
releaseKey(ord(keyname.upper()))
else:
print "key down:", globals()[keyname]
pressKey(globals()[keyname])
time.sleep(0.1)
print "key up:", globals()[keyname]
releaseKey(globals()[keyname])
if mods:
for m in modifiers:
if "VK_" in str(m):
releaseKey(globals()[m])
else:
releaseKey(m)
print "modifier up:", m
def sendKeyDown(keyname, modifiers):
for m in modifiers:
pressKey(m)
pressKey(keyname)
return True
def sendKeyUp(keyname, modifiers):
releaseKey(keyname)
for m in modifiers:
releaseKey(m)
return True
def sendClick(x, y, button=1):
print "sendClick", x, y
sendMouseMove(x, y, button)
sendMouseDown(button)
sendMouseUp(button)
return True
def sendTouchDown(x, y):
return touchDown(x, y)
def sendTouchUp(x, y):
return touchUp(x, y)
def sendTouchMove(x, y):
return touchMove(x, y)
def sendTap(x, y):
touchDown(x, y)
time.sleep(0.1)
touchUp(x, y)
return True
def sendMouseDown(button=1):
if button == 1:
flags = MOUSEEVENTF_LEFTDOWN
elif button == 2:
flags = MOUSEEVENTF_MIDDLEDOWN
elif button == 3:
flags = MOUSEEVENTF_RIGHTDOWN
else:
return False
event = Mouse(flags, 0, 0, 0)
return sendInput(event)
def sendMouseUp(button=1):
if button == 1:
flags = MOUSEEVENTF_LEFTUP
elif button == 2:
flags = MOUSEEVENTF_MIDDLEUP
elif button == 3:
flags = MOUSEEVENTF_RIGHTUP
else:
return False
event = Mouse(flags, 0, 0, 0)
return sendInput(event)
def sendMouseMove(x, y, button=1):
flags = MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE
x = x * 65535 /_mouse_input_area[0]
y = y * 65535 /_mouse_input_area[1]
event = Mouse(flags, x, y, 0)
return sendInput(event)
def shell(command):
if isinstance(command, list):
useShell = False
else:
useShell = True
try:
output = subprocess.check_output(command, shell=useShell)
except subprocess.CalledProcessError, e:
if hasattr(e, "output"):
output = e.output
else:
output = None
return output
def shellSOE(command):
if isinstance(command, list):
useShell = False
else:
useShell = True
try:
p = subprocess.Popen(command, shell=useShell,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out, err = p.communicate()
status = p.returncode
except OSError:
status, out, err = None, None, None
return status, out, err
def launchHTTPD():
global _HTTPServerProcess
_HTTPServerProcess = subprocess.Popen("python -m SimpleHTTPServer 8000")
return True
def stopHTTPD():
print "stopping " + str(_HTTPServerProcess)
_HTTPServerProcess.terminate()
return True
def enum_display_monitors():
''' Get positions and handles of one or more monitors. '''
def _callback(monitor, dc, rect, data):
rct = rect.contents
infos = {}
infos['left'] = rct.left
infos['right'] = rct.right
infos['top'] = rct.top
infos['bottom'] = rct.bottom
infos['hmon'] = monitor
infos['hdc'] = dc
results.append(infos)
return 0
results = []
MonitorEnumProc = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong,
ctypes.POINTER(ctypes.wintypes.RECT), ctypes.c_double)
callback = MonitorEnumProc(_callback)
ctypes.windll.user32.EnumDisplayMonitors(0, 0, callback, 0)
return results
if not "_g_monitors" in globals():
#_g_monitors = enum_display_monitors()
left = ctypes.windll.user32.GetSystemMetrics(SM_XVIRTUALSCREEN)
right =ctypes.windll.user32.GetSystemMetrics(SM_CXVIRTUALSCREEN)
top = ctypes.windll.user32.GetSystemMetrics(SM_YVIRTUALSCREEN)
bottom = ctypes.windll.user32.GetSystemMetrics(SM_CYVIRTUALSCREEN)
width = right - left
height = bottom - top
_mouse_input_area = (width, height)
if not "_g_touchInjenctionInitialized" in globals():
if (ctypes.windll.user32.InitializeTouchInjection(1, 1) != 0):
print "Initialized Touch Injection"
_g_touchInjenctionInitialized = True
else:
print "InitializeTouchInjection failed"
if __name__ == '__main__':
start = time.time()
screenshot(0)
end = time.time()
print "total screenshot time:", end-start
|
goyalankit/po-compiler
|
refs/heads/master
|
object_files/networkx-1.8.1/build/lib.linux-i686-2.7/networkx/linalg/tests/test_graphmatrix.py
|
35
|
from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestGraphMatrix(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global assert_equal
global assert_almost_equal
try:
import numpy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('NumPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.OI=numpy.array([[-1, -1, -1, 0],
[1, 0, 0, -1],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]])
self.A=numpy.array([[0, 1, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges_iter() )
self.WG.add_node(4)
self.WA=numpy.array([[0 , 0.5, 0.5, 0.5, 0],
[0.5, 0 , 0.5, 0 , 0],
[0.5, 0.5, 0 , 0 , 0],
[0.5, 0 , 0 , 0 , 0],
[0 , 0 , 0 , 0 , 0]])
self.MG=nx.MultiGraph(self.G)
self.MG2=self.MG.copy()
self.MG2.add_edge(0,1)
self.MG2A=numpy.array([[0, 2, 1, 1, 0],
[2, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.MGOI=numpy.array([[-1, -1, -1, -1, 0],
[1, 1, 0, 0, -1],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]])
def test_incidence_matrix(self):
"Conversion to incidence matrix"
assert_equal(nx.incidence_matrix(self.G,oriented=True),self.OI)
assert_equal(nx.incidence_matrix(self.G),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.MG,oriented=True),self.OI)
assert_equal(nx.incidence_matrix(self.MG),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.MG2,oriented=True),self.MGOI)
assert_equal(nx.incidence_matrix(self.MG2),numpy.abs(self.MGOI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True),self.OI)
assert_equal(nx.incidence_matrix(self.WG),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True,
weight='weight'),0.5*self.OI)
assert_equal(nx.incidence_matrix(self.WG,weight='weight'),
numpy.abs(0.5*self.OI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other'),
0.3*self.OI)
WMG=nx.MultiGraph(self.WG)
WMG.add_edge(0,1,attr_dict={'weight':0.5,'other':0.3})
assert_equal(nx.incidence_matrix(WMG,weight='weight'),
numpy.abs(0.5*self.MGOI))
assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True),
0.5*self.MGOI)
assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True),
0.3*self.MGOI)
def test_adjacency_matrix(self):
"Conversion to adjacency matrix"
assert_equal(nx.adj_matrix(self.G),self.A)
assert_equal(nx.adj_matrix(self.MG),self.A)
assert_equal(nx.adj_matrix(self.MG2),self.MG2A)
assert_equal(nx.adj_matrix(self.G,nodelist=[0,1]),self.A[:2,:2])
assert_equal(nx.adj_matrix(self.WG),self.WA)
assert_equal(nx.adj_matrix(self.WG,weight=None),self.A)
assert_equal(nx.adj_matrix(self.MG2,weight=None),self.MG2A)
assert_equal(nx.adj_matrix(self.WG,weight='other'),0.6*self.WA)
|
XuesongYang/end2end_dialog
|
refs/heads/master
|
AgentActClassifyingModel.py
|
1
|
''' Description: System action prediction oracle model.
Inputs: binary vectors including slot_tags + user_intents
Output: multi-label agent actions
Author : Xuesong Yang
Email : xyang45@illinois.edu
Created Date: Dec. 31, 2016
'''
from DataSetCSVagentActPred import DataSetCSVagentActPred
from utils import print_params, checkExistence, getActPred, eval_intentPredict, eval_actPred
from keras.layers import Input, LSTM, Dense, Dropout, merge
from keras.models import Model
import os
import numpy as np
np.random.seed(1983)
def writeUtterActTxt(utter_txt, act_txt, fname):
with open(fname, 'wb') as f:
for line_utter, line_act in zip(utter_txt, act_txt):
line_lst = [token.replace('act-', '') for token in line_act.strip().split(';')]
f.write('{}\t{}\n'.format(line_utter, ';'.join(line_lst)))
class AgentActClassifying(object):
def __init__(self, **argparams):
self.train_data = argparams['train_data']
if self.train_data is not None:
assert isinstance(self.train_data, DataSetCSVagentActPred)
self.test_data = argparams['test_data']
if self.test_data is not None:
assert isinstance(self.test_data, DataSetCSVagentActPred)
self.dev_data = argparams['dev_data']
if self.dev_data is not None:
assert isinstance(self.dev_data, DataSetCSVagentActPred)
self.model_folder = argparams['model_folder']
if self.model_folder is None:
pid = argparams['pid']
self.model_folder = './model/agentAct_{}'.format(pid)
os.makedirs('{}/weights'.format(self.model_folder))
os.makedirs('{}/dev_results'.format(self.model_folder))
self.optimizer = argparams['optimizer']
self.epoch_nb = argparams['epoch_nb']
self.hidden_size = argparams['hidden_size']
self.dropout = argparams['dropout_ratio']
self.loss = argparams['loss']
self.patience = argparams['patience']
self.batch_size = argparams['batch_size']
self.threshold = argparams['threshold']
self.weights_fname = argparams['weights_fname']
self.params = argparams
def _build(self):
print('Building Graph ...')
inputs = Input(shape=(self.window_size, self.userTagIntent_vocab_size),
name='tagIntent_input')
lstm_forward = LSTM(output_dim=self.hidden_size,
return_sequences=False,
name='LSTM_forward')(inputs)
lstm_forward = Dropout(self.dropout)(lstm_forward)
lstm_backward = LSTM(output_dim=self.hidden_size,
return_sequences=False,
go_backwards=True,
name='LSTM_backward')(inputs)
lstm_backward = Dropout(self.dropout)(lstm_backward)
lstm_concat = merge([lstm_forward, lstm_backward],
mode='concat', concat_axis=-1,
name='merge_bidirections')
act_softmax = Dense(output_dim=self.agentAct_vocab_size,
activation='sigmoid')(lstm_concat)
self.model = Model(input=inputs, output=act_softmax)
self.model.compile(optimizer=self.optimizer,
loss='binary_crossentropy')
def train(self):
print('Training model ...')
# load params
self.window_size = self.train_data.window_size
self.userTagIntent_vocab_size = self.train_data.userTagIntent_vocab_size
self.agentAct_vocab_size = self.train_data.agentAct_vocab_size
self.id2agentAct = self.train_data.id2agentAct
other_npz = '{}/other_vars.npz'.format(self.model_folder)
train_vars = {'window_size': self.window_size,
'userTagIntent_vocab_size': self.userTagIntent_vocab_size,
'agentAct_vocab_size': self.agentAct_vocab_size,
'id2agentAct': self.id2agentAct}
np.savez_compressed(other_npz, **train_vars)
self.params['window_size'] = self.window_size
self.params['userTagIntent_vocab_size'] = self.userTagIntent_vocab_size
self.params['agentAct_vocab_size'] = self.agentAct_vocab_size
print_params(self.params)
# build model graph, save graph and plot graph
self._build()
self._plot_graph()
graph_yaml = '{}/graph-arch.yaml'.format(self.model_folder)
with open(graph_yaml, 'w') as fyaml:
fyaml.write(self.model.to_yaml())
# load train data
X_train = self.train_data.userTagIntent_vecBin
y_train = self.train_data.agentAct_vecBin
train_utter_txt = self.train_data.userUtter_txt
train_act_txt = self.train_data.agentAct_txt
train_fname = '{}/train.target'.format(self.model_folder)
writeUtterActTxt(train_utter_txt, train_act_txt, train_fname)
# load dev data
X_dev = self.dev_data.userTagIntent_vecBin
y_dev = self.dev_data.agentAct_vecBin
dev_utter_txt = self.dev_data.userUtter_txt
dev_act_txt = self.dev_data.agentAct_txt
dev_fname = '{}/dev.target'.format(self.model_folder)
writeUtterActTxt(dev_utter_txt, dev_act_txt, dev_fname)
for ep in xrange(self.epoch_nb):
print('<Epoch {}>'.format(ep))
self.model.fit(x=X_train, y=y_train, batch_size=self.batch_size, nb_epoch=1, verbose=2)
act_probs = self.model.predict(X_dev)
precision, recall, fscore, accuracy_frame, threshold = eval_intentPredict(act_probs, y_dev)
print('ep={}, precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(ep, precision, recall, fscore, accuracy_frame, threshold))
dev_pred_txt = getActPred(act_probs, threshold, self.id2agentAct)
dev_results_fname = '{}/dev_results/dev_ep={}.pred'.format(self.model_folder, ep)
writeUtterActTxt(dev_utter_txt, dev_pred_txt, dev_results_fname)
print('Write dev results: {}'.format(dev_results_fname))
weights_fname = '{}/weights/ep={}_f1={:.4f}_frameAcc={:.4f}_th={:.4f}.h5'.format(self.model_folder, ep, fscore, accuracy_frame, threshold)
print('Saving Model: {}'.format(weights_fname))
self.model.save_weights(weights_fname, overwrite=True)
def _plot_graph(self):
from keras.utils import visualize_util
graph_png = '{}/graph-plot.png'.format(self.model_folder)
visualize_util.plot(self.model,
to_file=graph_png,
show_shapes=True,
show_layer_names=True)
def predict(self):
print('Predicting ...')
result_folder = '{}/test_results'.format(self.model_folder)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
probs_fname = '{}/actProb_{}.npz'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
target_fname = '{}/act_test.target'.format(result_folder)
pred_fname = '{}/act_{}.pred'.format(result_folder, os.path.basename(self.weights_fname).split('_')[0])
print('\tact_probs={}'.format(probs_fname))
print('\tact_target={}'.format(target_fname))
print('\tact_pred={}'.format(pred_fname))
utter_txt = self.test_data.userUtter_txt
target_act = self.test_data.agentAct_txt
writeUtterActTxt(utter_txt, target_act, target_fname)
# prediction, save probs, and texts.
X_test = self.test_data.userTagIntent_vecBin
pred_probs = self.model.predict(X_test)
np.savez_compressed(probs_fname, probs=pred_probs)
pred_txt = getActPred(pred_probs, self.threshold, self.id2agentAct)
writeUtterActTxt(utter_txt, pred_txt, pred_fname)
# calculate performance scores
_, precision, recall, fscore, accuracy_frame = eval_actPred(pred_probs, self.test_data.agentAct_vecBin,
self.threshold)
print('AgentActPred: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}'.format(precision, recall, fscore, accuracy_frame))
def load_model(self):
print('Loading model ...')
# check existence of params
assert os.path.exists(self.model_folder), 'model_folder is not found: {}'.format(self.model_folder)
assert self.threshold is not None, 'Argument required: --threshold'
assert self.weights_fname is not None, 'Argument required: --weights-file'
checkExistence(self.weights_fname)
model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
checkExistence(model_graph)
checkExistence(model_train_vars)
# load models
from keras.models import model_from_yaml
with open(model_graph, 'r') as fgraph:
self.model = model_from_yaml(fgraph.read())
self.model.load_weights(self.weights_fname)
npzfile = np.load(model_train_vars)
self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
self.userTagIntent_vocab_size = np.int32(npzfile['userTagIntent_vocab_size'][()])
self.id2agentAct = npzfile['id2agentAct'][()]
self.window_size = np.int32(npzfile['window_size'][()])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-npz', dest='data_npz',
help='.npz file including instances of DataSetCSVagentAct class for train, dev and test')
parser.add_argument('--loss', dest='loss',
default='categorical_crossentropy',
help='loss function')
parser.add_argument('--optimizer', dest='optimizer',
default='adam', help='optimizer')
parser.add_argument('--epoch-nb', dest='epoch_nb', type=int,
default=300, help='number of epoches')
parser.add_argument('--patience', dest='patience', type=int,
default=10, help='patience for early stopping')
parser.add_argument('--hidden-size', dest='hidden_size', type=int,
default=64, help='the number of hidden units in recurrent layer')
parser.add_argument('--dropout-ratio', dest='dropout_ratio',
type=float, default=0.5, help='dropout ratio')
parser.add_argument('--model-folder', dest='model_folder',
help='the folder contains graph.yaml, weights.h5, and other_vars.npz, and results')
parser.add_argument('--batch-size', dest='batch_size',
type=int, default=32, help='batch size')
parser.add_argument('--test', dest='test_only', action='store_true',
help='only perform testing if this option is activated.')
parser.add_argument('--train', dest='train_only', action='store_true',
help='only perform training if this option is activated.')
parser.add_argument('--weights-file', dest='weights_fname', help='.h5 weights file.')
parser.add_argument('--threshold', dest='threshold', type=float, help='float number of threshold for multi-label prediction decision.')
args = parser.parse_args()
argparams = vars(args)
test_only = argparams['test_only']
train_only = argparams['train_only']
assert train_only or test_only, 'Arguments required: either --train or --test'
pid = os.getpid()
argparams['pid'] = pid
npz_fname = argparams['data_npz']
checkExistence(npz_fname)
data_npz = np.load(npz_fname)
if train_only: # train model
argparams['train_data'] = data_npz['train_data'][()]
argparams['dev_data'] = data_npz['dev_data'][()]
argparams['test_data'] = None
model = AgentActClassifying(**argparams)
model.train()
else:
# train_only is False, while test_only is True
# need to load model
argparams['train_data'] = None
argparams['dev_data'] = None
argparams['test_data'] = None
if argparams['model_folder'] is None:
raise Exception('Argument required: --model-folder')
model = AgentActClassifying(**argparams)
model.load_model()
# test
if test_only:
test_data = data_npz['test_data'][()]
model.test_data = test_data
model.predict()
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_system_automation_trigger.py
|
13
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_automation_trigger
short_description: Trigger for automation stitches in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and automation_trigger category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_automation_trigger:
description:
- Trigger for automation stitches.
default: null
type: dict
suboptions:
event_type:
description:
- Event type.
type: str
choices:
- ioc
- event-log
- reboot
- low-memory
- high-cpu
- license-near-expiry
- ha-failover
- config-change
- security-rating-summary
- virus-ips-db-updated
ioc_level:
description:
- IOC threat level.
type: str
choices:
- medium
- high
license_type:
description:
- License type.
type: str
choices:
- forticare-support
- fortiguard-webfilter
- fortiguard-antispam
- fortiguard-antivirus
- fortiguard-ips
- fortiguard-management
- forticloud
logid:
description:
- Log ID to trigger event.
type: int
name:
description:
- Name.
required: true
type: str
trigger_day:
description:
- Day within a month to trigger.
type: int
trigger_frequency:
description:
- Scheduled trigger frequency .
type: str
choices:
- hourly
- daily
- weekly
- monthly
trigger_hour:
description:
- Hour of the day on which to trigger (0 - 23).
type: int
trigger_minute:
description:
- Minute of the hour on which to trigger (0 - 59, 60 to randomize).
type: int
trigger_type:
description:
- Trigger type.
type: str
choices:
- event-based
- scheduled
trigger_weekday:
description:
- Day of week for trigger.
type: str
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Trigger for automation stitches.
fortios_system_automation_trigger:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_automation_trigger:
event_type: "ioc"
ioc_level: "medium"
license_type: "forticare-support"
logid: "6"
name: "default_name_7"
trigger_day: "8"
trigger_frequency: "hourly"
trigger_hour: "10"
trigger_minute: "11"
trigger_type: "event-based"
trigger_weekday: "sunday"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_automation_trigger_data(json):
option_list = ['event_type', 'ioc_level', 'license_type',
'logid', 'name', 'trigger_day',
'trigger_frequency', 'trigger_hour', 'trigger_minute',
'trigger_type', 'trigger_weekday']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_automation_trigger(data, fos):
vdom = data['vdom']
state = data['state']
system_automation_trigger_data = data['system_automation_trigger']
filtered_data = underscore_to_hyphen(filter_system_automation_trigger_data(system_automation_trigger_data))
if state == "present":
return fos.set('system',
'automation-trigger',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'automation-trigger',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_automation_trigger']:
resp = system_automation_trigger(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_automation_trigger": {
"required": False, "type": "dict", "default": None,
"options": {
"event_type": {"required": False, "type": "str",
"choices": ["ioc", "event-log", "reboot",
"low-memory", "high-cpu", "license-near-expiry",
"ha-failover", "config-change", "security-rating-summary",
"virus-ips-db-updated"]},
"ioc_level": {"required": False, "type": "str",
"choices": ["medium", "high"]},
"license_type": {"required": False, "type": "str",
"choices": ["forticare-support", "fortiguard-webfilter", "fortiguard-antispam",
"fortiguard-antivirus", "fortiguard-ips", "fortiguard-management",
"forticloud"]},
"logid": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"trigger_day": {"required": False, "type": "int"},
"trigger_frequency": {"required": False, "type": "str",
"choices": ["hourly", "daily", "weekly",
"monthly"]},
"trigger_hour": {"required": False, "type": "int"},
"trigger_minute": {"required": False, "type": "int"},
"trigger_type": {"required": False, "type": "str",
"choices": ["event-based", "scheduled"]},
"trigger_weekday": {"required": False, "type": "str",
"choices": ["sunday", "monday", "tuesday",
"wednesday", "thursday", "friday",
"saturday"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
hasadna/knesset-data-pipelines
|
refs/heads/master
|
datapackage_pipelines_knesset/processors/load_to_kv.py
|
2
|
import itertools
import copy
import logging
import os
import datapackage
from kvfile import PersistentKVFile
from datapackage_pipelines.wrapper import ingest, spew, get_dependency_datapackage_url
from datapackage_pipelines.utilities.resource_matcher import ResourceMatcher
from datapackage_pipelines.utilities.resources import tabular, PROP_STREAMING, \
PROP_STREAMED_FROM
def progress_logger(iter, log_progress_rows):
for i, row in enumerate(iter, 1):
yield row
if i % log_progress_rows == 0:
logging.info('loaded {} rows'.format(i))
def kv_res_iter(kv, resource=None, kv_key=None):
if resource:
logging.info('saving to kv')
yield from (row
for _, row in kv.insert(((kv_key.format(**row) if kv_key
else "{:08x}".format(i),
dict(row))
for i, row in enumerate(resource))))
else:
logging.info('reading from kv')
yield from (row for _, row in kv.items())
class ResourceLoader(object):
def __init__(self):
self.parameters, self.dp, self.res_iter = ingest()
def __call__(self):
self.parameters['resource'] = self.parameters['resource-name']
kv_cache = self.parameters.get('kv-cache', False)
kv_path = self.parameters['kv-path']
url = self.parameters['url']
limit_rows = self.parameters.get('limit-rows')
log_progress_rows = self.parameters.get('log-progress-rows')
dep_prefix = 'dependency://'
if url.startswith(dep_prefix):
dependency = url[len(dep_prefix):].strip()
url = get_dependency_datapackage_url(dependency)
assert url is not None, "Failed to fetch output datapackage for dependency '%s'" % dependency
stream = self.parameters.get('stream', True)
required = self.parameters.get('required', True)
resource = self.parameters.get('resource')
resources = self.parameters.get('resources')
if resource is not None:
assert not resources
resource_index = resource if isinstance(resource, int) else None
else:
assert resources
resource_index = None
resource = list(resources.keys())
name_matcher = ResourceMatcher(resource) if isinstance(resource, (str, list)) else None
selected_resources = []
found = False
try:
dp = datapackage.DataPackage(url)
except Exception:
if required:
raise
else:
dp = None
if dp:
dp = self.process_datapackage(dp)
for i, orig_res in enumerate(dp.resources):
if resource_index == i or \
(name_matcher is not None and name_matcher.match(orig_res.descriptor.get('name'))):
found = True
desc = copy.deepcopy(orig_res.descriptor)
if 'primaryKey' in desc.get('schema', {}):
# Avoid duplication checks
del orig_res.descriptor['schema']['primaryKey']
orig_res.commit()
desc[PROP_STREAMED_FROM] = orig_res.source
if resources:
desc.update(resources[desc['name']])
self.dp['resources'].append(desc)
if tabular(desc) and stream:
desc[PROP_STREAMING] = True
if kv_cache and os.path.exists(kv_path):
kv = PersistentKVFile(kv_path, concurrent=True)
orig_res_iter = kv_res_iter(kv, kv_key=self.parameters.get('kv-key'))
else:
kv = PersistentKVFile(kv_path, concurrent=True)
orig_res_iter = kv_res_iter(kv, orig_res.iter(keyed=True), kv_key=self.parameters.get('kv-key'))
if limit_rows:
orig_res_iter = itertools.islice(orig_res_iter, limit_rows)
if log_progress_rows:
orig_res_iter = progress_logger(orig_res_iter, log_progress_rows)
selected_resources.append(orig_res_iter)
else:
desc[PROP_STREAMING] = False
assert found or not required, "Failed to find resource with index or name matching %r" % resource
spew(self.dp, itertools.chain(self.res_iter, selected_resources))
def process_datapackage(self, dp_):
return dp_
if __name__ == '__main__':
ResourceLoader()()
|
cortedeltimo/SickRage
|
refs/heads/master
|
lib/lxml/html/soupparser.py
|
22
|
"""External interface to the BeautifulSoup HTML parser.
"""
__all__ = ["fromstring", "parse", "convert_tree"]
import re
from lxml import etree, html
try:
from bs4 import (
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
Declaration, Doctype)
_DECLARATION_OR_DOCTYPE = (Declaration, Doctype)
except ImportError:
from BeautifulSoup import (
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
Declaration)
_DECLARATION_OR_DOCTYPE = Declaration
def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a string of HTML data into an Element tree using the
BeautifulSoup parser.
Returns the root ``<html>`` Element of the tree.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
return _parse(data, beautifulsoup, makeelement, **bsargs)
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root)
def convert_tree(beautiful_soup_tree, makeelement=None):
"""Convert a BeautifulSoup tree to a list of Element trees.
Returns a list instead of a single root Element to support
HTML-like soup with more than one root element.
You can pass a different Element factory through the `makeelement`
keyword.
"""
root = _convert_tree(beautiful_soup_tree, makeelement)
children = root.getchildren()
for child in children:
root.remove(child)
return children
# helpers
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if hasattr(beautifulsoup, "HTML_ENTITIES"): # bs3
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'html'
if hasattr(beautifulsoup, "DEFAULT_BUILDER_FEATURES"): # bs4
if 'features' not in bsargs:
bsargs['features'] = 'html.parser' # use Python html parser
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
_parse_doctype_declaration = re.compile(
r'(?:\s|[<!])*DOCTYPE\s*HTML'
r'(?:\s+PUBLIC)?(?:\s+(\'[^\']*\'|"[^"]*"))?'
r'(?:\s+(\'[^\']*\'|"[^"]*"))?',
re.IGNORECASE).match
class _PseudoTag:
# Minimal imitation of BeautifulSoup.Tag
def __init__(self, contents):
self.name = 'html'
self.attrs = []
self.contents = contents
def __iter__(self):
return self.contents.__iter__()
def _convert_tree(beautiful_soup_tree, makeelement):
if makeelement is None:
makeelement = html.html_parser.makeelement
# Split the tree into three parts:
# i) everything before the root element: document type
# declaration, comments, processing instructions, whitespace
# ii) the root(s),
# iii) everything after the root: comments, processing
# instructions, whitespace
first_element_idx = last_element_idx = None
html_root = declaration = None
for i, e in enumerate(beautiful_soup_tree):
if isinstance(e, Tag):
if first_element_idx is None:
first_element_idx = i
last_element_idx = i
if html_root is None and e.name and e.name.lower() == 'html':
html_root = e
elif declaration is None and isinstance(e, _DECLARATION_OR_DOCTYPE):
declaration = e
# For a nice, well-formatted document, the variable roots below is
# a list consisting of a single <html> element. However, the document
# may be a soup like '<meta><head><title>Hello</head><body>Hi
# all<\p>'. In this example roots is a list containing meta, head
# and body elements.
if first_element_idx is None:
pre_root = post_root = []
roots = beautiful_soup_tree.contents
else:
pre_root = beautiful_soup_tree.contents[:first_element_idx]
roots = beautiful_soup_tree.contents[first_element_idx:last_element_idx+1]
post_root = beautiful_soup_tree.contents[last_element_idx+1:]
# Reorganize so that there is one <html> root...
if html_root is not None:
# ... use existing one if possible, ...
i = roots.index(html_root)
html_root.contents = roots[:i] + html_root.contents + roots[i+1:]
else:
# ... otherwise create a new one.
html_root = _PseudoTag(roots)
convert_node = _init_node_converters(makeelement)
# Process pre_root
res_root = convert_node(html_root)
prev = res_root
for e in reversed(pre_root):
converted = convert_node(e)
if converted is not None:
prev.addprevious(converted)
prev = converted
# ditto for post_root
prev = res_root
for e in post_root:
converted = convert_node(e)
if converted is not None:
prev.addnext(converted)
prev = converted
if declaration is not None:
try:
# bs4 provides full Doctype string
doctype_string = declaration.output_ready()
except AttributeError:
doctype_string = declaration.string
match = _parse_doctype_declaration(doctype_string)
if not match:
# Something is wrong if we end up in here. Since soupparser should
# tolerate errors, do not raise Exception, just let it pass.
pass
else:
external_id, sys_uri = match.groups()
docinfo = res_root.getroottree().docinfo
# strip quotes and update DOCTYPE values (any of None, '', '...')
docinfo.public_id = external_id and external_id[1:-1]
docinfo.system_url = sys_uri and sys_uri[1:-1]
return res_root
def _init_node_converters(makeelement):
converters = {}
ordered_node_types = []
def converter(*types):
def add(handler):
for t in types:
converters[t] = handler
ordered_node_types.append(t)
return handler
return add
def find_best_converter(node):
for t in ordered_node_types:
if isinstance(node, t):
return converters[t]
return None
def convert_node(bs_node, parent=None):
# duplicated in convert_tag() below
try:
handler = converters[type(bs_node)]
except KeyError:
handler = converters[type(bs_node)] = find_best_converter(bs_node)
if handler is None:
return None
return handler(bs_node, parent)
def map_attrs(bs_attrs):
if isinstance(bs_attrs, dict): # bs4
attribs = {}
for k, v in bs_attrs.items():
if isinstance(v, list):
v = " ".join(v)
attribs[k] = unescape(v)
else:
attribs = dict((k, unescape(v)) for k, v in bs_attrs)
return attribs
def append_text(parent, text):
if len(parent) == 0:
parent.text = (parent.text or '') + text
else:
parent[-1].tail = (parent[-1].tail or '') + text
# converters are tried in order of their definition
@converter(Tag, _PseudoTag)
def convert_tag(bs_node, parent):
attrs = bs_node.attrs
if parent is not None:
attribs = map_attrs(attrs) if attrs else None
res = etree.SubElement(parent, bs_node.name, attrib=attribs)
else:
attribs = map_attrs(attrs) if attrs else {}
res = makeelement(bs_node.name, attrib=attribs)
for child in bs_node:
# avoid double recursion by inlining convert_node(), see above
try:
handler = converters[type(child)]
except KeyError:
pass
else:
if handler is not None:
handler(child, res)
continue
convert_node(child, res)
return res
@converter(Comment)
def convert_comment(bs_node, parent):
res = html.HtmlComment(bs_node)
if parent is not None:
parent.append(res)
return res
@converter(ProcessingInstruction)
def convert_pi(bs_node, parent):
if bs_node.endswith('?'):
# The PI is of XML style (<?as df?>) but BeautifulSoup
# interpreted it as being SGML style (<?as df>). Fix.
bs_node = bs_node[:-1]
res = etree.ProcessingInstruction(*bs_node.split(' ', 1))
if parent is not None:
parent.append(res)
return res
@converter(NavigableString)
def convert_text(bs_node, parent):
if parent is not None:
append_text(parent, unescape(bs_node))
return None
return convert_node
# copied from ET's ElementSoup
try:
from html.entities import name2codepoint # Python 3
except ImportError:
from htmlentitydefs import name2codepoint
handle_entities = re.compile(r"&(\w+);").sub
try:
unichr
except NameError:
# Python 3
unichr = chr
def unescape(string):
if not string:
return ''
# work around oddities in BeautifulSoup's entity handling
def unescape_entity(m):
try:
return unichr(name2codepoint[m.group(1)])
except KeyError:
return m.group(0) # use as is
return handle_entities(unescape_entity, string)
|
potash/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_voting_classifier.py
|
21
|
"""Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
|
mikelarre/odoomrp-wip-1
|
refs/heads/8.0
|
task_delegation_wizard/__init__.py
|
29
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
from . import wizard
|
Heroes-Academy/DataStructures_Winter2017
|
refs/heads/master
|
code/solutions/resursive_solution.py
|
2
|
"""
This file is the solution to the recursive problems assigned in class.
There are comments below to explain things.
"""
def recursive_palindrome(number):
number_str = str(number)
if len(number_str) == 1:
# in this case, there is only one thing left. return True
return True
elif number_str[0] == number_str[-1]:
### you can index a string like a list
### and indexing using -1 gets you the last item
### in this case, the number string is a palindrome so far
### so, let's cut off the ends and recurse on it
### this says, "Start at 1, go until -1"
new_number_str = number_str[1:-1]
### just in case, check for 0 length (aka, our input string was only 2 long)
### then, we return True anyway
if len(new_number_str) == 0:
return True
else:
new_number = int(new_number_str)
return recursive_palindrome(new_number)
else:
# this is the catch all condition. it's false here.
return False
def test_one():
assert recursive_palindrome(1000) == False
assert recursive_palindrome(110111233) == True
assert recursive_palindrome(856743347658) == True
def hailstone_number(number):
if number == 1:
### this it the base case
return [number]
elif number % 2 == 0:
### this is the first recursive case
### here I am putting the number into an array by itself
### and then, I add the result of the recursion to it
### this works because I know the function call will always return another array
### and you can add two arrays together to form a larger array
return [number] + hailstone_number(number//2)
else:
### the other recursive case.
return [number] + hailstone_number(3*number+1)
def test_two():
assert hailstone_number(8) == [8, 4, 2, 1]
assert hailstone_number(9) == [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
assert hailstone_number(3) == [3, 10, 5, 16, 8, 4, 2, 1]
def find_minimum_v1(a_list):
if len(a_list) == 1:
### our base case, return the single number
return a_list[0]
else:
### our recrusive case.
### get the first number
first = a_list[0]
### find the minimum of the rest of the list
second = find_minimum_v1(a_list[1:])
return min(first, second)
def find_minimum_v2(a_list):
if len(a_list) == 1:
### our first base case, return the single number
return a_list[0]
elif len(a_list) == 2:
### our other base case, return the min of the two numbers
return min(a_list[0], a_list[1])
else:
### find the minimum of either half and return it
half = len(a_list) // 2
first = find_minimum_v2(a_list[:half])
second = find_minimum_v2(a_list[half:])
return min(first, second)
def test_three():
a_list = [1,2,3,4,817,83,-10,-188]
assert find_minimum_v1(a_list) == -188
assert find_minimum_v2(a_list) == -188
try:
test_one()
except AssertionError as e:
print("=========================")
print("Test one failed.")
print("=========================\n")
raise e
try:
test_two()
except AssertionError as e:
print("=========================")
print("Test two failed; {}".format(e))
print("=========================\n")
raise e
try:
test_three()
except AssertionError as e:
print("=========================")
print("Test three failed; {}".format(e))
print("=========================\n")
raise e
|
paulklemm/interaction-effects
|
refs/heads/master
|
js-html/tests/bower_components/dat-gui/utils/build.py
|
29
|
#/usr/bin/env python
from optparse import OptionParser
import httplib, urllib
import os, fnmatch, shutil, re
usage = """usage: %prog [options] command
Commands:
build build the script
debug print the header to include js files
clean remove any built files
"""
parser = OptionParser(usage=usage)
parser.add_option('-l', '--level', dest='level', default='SIMPLE_OPTIMIZATIONS',
help='Closure compilation level [WHITESPACE_ONLY, SIMPLE_OPTIMIZATIONS, \
ADVANCED_OPTIMIZATIONS]')
UTILS = os.path.dirname(os.path.relpath(__file__))
PREFIX = os.path.join(UTILS,'..')
SRC_ROOT= os.path.join(PREFIX,'src')
BUILD_ROOT = os.path.join(PREFIX,'build')
INDEX = os.path.join(PREFIX,'index.html')
BUILD_NAME = 'DAT.GUI'
ALL_JS = ['DAT.GUI.js','DAT.GUI']
LICENSE = """/**
* dat.gui Javascript Controller Library
* http://dataarts.github.com/dat.gui
*
* Copyright 2011 Data Arts Team, Google Creative Lab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
"""
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def expand(path, globby):
matches = []
path = path.split('.')
path.insert(0,SRC_ROOT)
filename = "%s.%s"%(path[-2],path[-1])
if fnmatch.fnmatch(filename, globby):
tmppath = os.path.join(*(path[:-1]+[filename]))
if os.path.exists(tmppath):
path[-1] = filename
else:
path = path[:-2]+[filename]
path = os.path.join(*path)
if os.path.isdir(path):
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, globby):
matches.append(os.path.join(root, filename))
else:
matches.append(path)
return matches
def unique(seq, idfun=None):
"""Ordered uniquify function
if in 2.7 use:
OrderedDict.fromkeys(seq).keys()
"""
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def source_list(src, globby='*.js'):
def expander(f):
return expand(f,globby)
return unique(flatten(map(expander, src)))
def compile(code):
params = urllib.urlencode([
('js_code', code),
('compilation_level', options.level),
('output_format', 'text'),
('output_info', 'compiled_code'),
])
headers = { 'Content-type': 'application/x-www-form-urlencoded' }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
conn.close()
return data
def bytes_to_kb(b,digits=1):
return round(0.0009765625 * b, digits)
def clean():
if os.path.exists(BUILD_ROOT):
shutil.rmtree(BUILD_ROOT)
print('DONE. Removed %s'%(BUILD_ROOT,))
else:
print('DONE. Nothing to clean')
def build(jssrc, csssrc=list([''])):
if not os.path.exists(BUILD_ROOT):
os.makedirs(BUILD_ROOT)
if csssrc:
cssfiles = source_list(csssrc, '*.css')
print('CSS files being compiled: ', cssfiles)
css = '\n'.join([open(f).read() for f in cssfiles])
css = re.sub(r'[ \t\n\r]+',' ',css)
jsfiles = source_list(jssrc, '*.js')
print('JS files being compiled: ', jsfiles)
code = '\n'.join([open(f).read() for f in jsfiles])
if csssrc:
code += """DAT.GUI.inlineCSS = '%s';\n"""%(css,)
outpath = os.path.join(BUILD_ROOT, BUILD_NAME+'.js')
with open(outpath,'w') as f:
f.write(LICENSE)
f.write(code)
compiled = compile(code)
outpathmin = os.path.join(BUILD_ROOT, BUILD_NAME+'.min.js')
with open(outpathmin,'w') as f:
f.write(LICENSE)
f.write(compiled)
size = bytes_to_kb(os.path.getsize(outpath))
sizemin = bytes_to_kb(os.path.getsize(outpathmin))
with open(INDEX,'r') as f:
index = f.read()
with open(INDEX,'w') as f:
index = re.sub(r'<small id=\'buildsize\'>\[[0-9.]+kb\]','<small id=\'buildsize\'>[%skb]'%(size,),index)
index = re.sub(r'<small id=\'buildsizemin\'>\[[0-9.]+kb\]','<small id=\'buildsizemin\'>[%skb]'%(sizemin,),index)
f.write(index)
print('DONE. Built files in %s.'%(BUILD_ROOT,))
def debug(jssrc, csssrc=list([''])):
head = ""
files = source_list(csssrc, '*.css')
for f in files:
f = f.replace(PREFIX+'/','')
head += '<link href="%s" media="screen" rel="stylesheet" type="text/css"/>\n'%(f,)
files = source_list(jssrc, '*.js')
for f in files:
f = f.replace(PREFIX+'/','')
head += '<script type="text/javascript" src="%s"></script>\n'%(f,)
print(head)
if __name__ == '__main__':
global options
(options, args) = parser.parse_args()
if len(args) != 1:
print(parser.usage)
exit(0)
command = args[0]
if command == 'build':
build(ALL_JS)
elif command == 'clean':
clean()
elif command == 'debug':
debug(ALL_JS)
|
fnouama/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/db/models/sql/query.py
|
72
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from django.utils.copycompat import deepcopy
from django.utils.tree import Node
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import select_related_descend, InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.validate_sql(sql)
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def validate_sql(self, sql):
if not sql.lower().strip().startswith('select'):
raise InvalidQuery('Raw queries are limited to SELECT queries. Use '
'connection.cursor directly for other types of queries.')
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
self.alias_map = {} # Maps alias to join information
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.rev_join_map = {} # Reverse of join_map.
self.quote_cache = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in.
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
return sql % params
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.rev_join_map = self.rev_join_map.copy()
obj.quote_cache = {}
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
new_alias = self.join(rhs.rev_join_map[alias],
(conjunction and not first), used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# the first join that is exclusive to the lhs (self) must be converted
# to an outer join.
if not conjunction:
for alias in self.tables[1:]:
if self.alias_refcount[alias] == 1:
self.promote_alias(alias, True)
break
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
columns = set()
orig_opts = self.model._meta
seen = {}
must_include = {self.model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = opts.get_field_by_name(name)[0].rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= 1
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias][LHS_ALIAS]
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.iteritems():
alias_data = list(self.alias_map[old_alias])
alias_data[RHS_ALIAS] = new_alias
t = self.rev_join_map[old_alias]
data = list(self.join_map[t])
data[data.index(old_alias)] = new_alias
self.join_map[t] = tuple(data)
self.rev_join_map[new_alias] = t
del self.rev_join_map[old_alias]
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = tuple(alias_data)
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data[TABLE_NAME]]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data[LHS_ALIAS]
if lhs in change_map:
data = list(data)
data[LHS_ALIAS] = change_map[lhs]
self.alias_map[alias] = tuple(data)
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = {}
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs][TABLE_NAME]
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias][LHS_ALIAS] != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = (table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
self.rev_join_map[alias] = t_ident
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
for field, model in opts.get_fields_with_model():
if model not in seen:
if model is proxied_model:
seen[model] = root_alias
else:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif hasattr(value, 'evaluate'):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
for alias, aggregate in self.aggregates.items():
if alias == parts[0]:
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, AND)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
col, alias, join_list = self.trim_joins(target, join_list, last, trim)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
if having_clause:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if not (lookup_type == 'in'
and not hasattr(value, 'as_sql')
and not hasattr(value, '_as_sql')
and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
# We also need to handle the case where a subquery is provided
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases)
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = get_proxied_model(opts)
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves less table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join[RHS_JOIN_COL]:
self.unref_alias(final_alias)
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(param_iter.next())
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = set(field_names).difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = set(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]][LHS_JOIN_COL]
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info[LHS_JOIN_COL] != select_col
or join_info[JOIN_TYPE] != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info[RHS_ALIAS]
select_col = join_info[RHS_JOIN_COL]
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def get_proxied_model(opts):
int_opts = opts
proxied_model = None
while int_opts.proxy:
proxied_model = int_opts.proxy_for_model
int_opts = proxied_model._meta
return proxied_model
|
bmay98/xortool
|
refs/heads/master
|
xortool/args.py
|
1
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import getopt
from routine import *
class ArgError(Exception):
pass
PARAMETERS = {
"input_is_hex": 0,
"max_key_length": 65,
"known_key_length": None,
"most_frequent_char": None,
"brute_chars": None,
"brute_printable": None,
"frequency_spread": 0,
"filename": "-", # stdin by default
}
def show_usage_and_exit():
print """xortool.py
A tool to do some xor analysis:
- guess the key length (based on count of equal chars)
- guess the key (base on knowledge of most probable char)
Usage:
{} [-h|--help] [OPTIONS] [<filename>]
Options:
-l,--key-length length of the key (integer)
-c,--char most possible char (one char or hex code)
-m,--max-keylen=32 maximum key length to probe (integer)
-x,--hex input is hex-encoded str
-b,--brute-chars brute force all possible characters
-o,--brute-printable same as -b but will only use printable
characters for keys
""".format(os.path.basename(sys.argv[0]))
sys.exit(1)
def parse_parameters():
"""
Parse arguments and update PARAMETERS if needed
"""
options, arguments = get_options_and_arguments(sys.argv[1:])
update_parameters(options, arguments)
return PARAMETERS
def get_options_and_arguments(program_arguments):
options, arguments = [], []
try:
options, arguments = getopt.gnu_getopt(program_arguments,
"l:c:s:m:xbo",
["key-length=",
"char=",
"spread=",
"max-keylen=",
"hex",
"help",
"usage",
"brute-chars",
"brute-printable"])
except getopt.GetoptError:
show_usage_and_exit()
return options, arguments
def update_parameters(options, arguments):
global PARAMETERS
try:
for option, value in options:
if option in ("-x", "--hex"):
PARAMETERS["input_is_hex"] = 1
elif option in ("-c", "--char"):
PARAMETERS["most_frequent_char"] = parse_char(value)
elif option in ("-l", "--key-length"):
PARAMETERS["known_key_length"] = int(value)
elif option in ("-b", "--brute-chars"):
PARAMETERS["brute_chars"] = True
elif option in ("-o", "--brute-printable"):
PARAMETERS["brute_printable"] = True
elif option in ("-s", "--spread"):
PARAMETERS["frequency_spread"] = int(value)
elif option in ("-m", "--max-keylen"):
PARAMETERS["max_key_length"] = int(value)
elif option in ("-h", "--help", "--usage"):
show_usage_and_exit()
else:
raise ArgError("Unknown argument: {0}".format(option))
except ValueError as err:
raise ArgError(str(err))
if ((PARAMETERS["most_frequent_char"] and PARAMETERS["brute_printable"]
or
PARAMETERS["most_frequent_char"] and PARAMETERS["brute_chars"]
or
PARAMETERS["brute_printable"] and PARAMETERS["brute_chars"])):
raise ArgError("Only one out of -c, -b or -o should be used")
if len(arguments) == 1:
PARAMETERS["filename"] = arguments[0]
return
|
PlayUAV/MissionPlanner
|
refs/heads/master
|
Lib/encodings/utf_16_be.py
|
103
|
""" Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
BirkbeckCTP/janeway
|
refs/heads/master
|
src/core/model_utils.py
|
1
|
"""
Utilities for designing and working with models
"""
__copyright__ = "Copyright 2018 Birkbeck, University of London"
__author__ = "Birkbeck Centre for Technology and Publishing"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from contextlib import contextmanager
from django.db import models, IntegrityError, transaction
from django.db.models import fields
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import (
create_forward_many_to_many_manager,
ManyToManyDescriptor,
)
from django.http.request import split_domain_port
from django.utils.functional import cached_property
from utils import logic
class AbstractSiteModel(models.Model):
"""Adds site-like functionality to any model"""
SCHEMES = {
True: "https",
False: "http",
}
domain = models.CharField(
max_length=255, default="www.example.com", unique=True)
is_secure = models.BooleanField(
default=False,
help_text="If the site should redirect to HTTPS, mark this.",
)
class Meta:
abstract = True
@classmethod
def get_by_request(cls, request):
domain = request.get_host()
# Lookup by domain with/without port
try:
obj = cls.objects.get(domain=domain)
except cls.DoesNotExist:
# Lookup without port
domain, _port = split_domain_port(domain)
obj = cls.objects.get(domain=domain)
return obj
def site_url(self, path=None):
return logic.build_url(
netloc=self.domain,
scheme=self.SCHEMES[self.is_secure],
path=path or "",
)
class PGCaseInsensitivedMixin():
"""Activates the citext postgres extension for the given field"""
def db_type(self, connection):
if connection.vendor == "postgresql":
return "citext"
elif connection.vendor == "sqlite":
return "text collate nocase"
else:
return super().db_type(connection)
class PGCaseInsensitiveEmailField(PGCaseInsensitivedMixin, models.EmailField):
pass
def merge_models(src, dest):
""" Moves relations from `src` to `dest` and deletes src
:param src: Model instance to be removed
:param dest: Model instance into which src will be merged
"""
model = src._meta.model
if dest._meta.model != model:
raise TypeError("Can't merge a %s with a %s", model, dest._meta.model)
fields = src._meta.get_fields()
for field in fields:
if field.many_to_many:
# These can be ManyToManyRel or ManyToManyField depending on the
# direction the relationship was declared.
if isinstance(field, models.Field):
related_model = field.related_model
remote_field = field.remote_field.name
manager = getattr(dest, field.get_attname())
else:
accessor = getattr(src, field.get_accessor_name())
manager = getattr(dest, field.get_accessor_name())
# Query all related objects via through, in case there is a custom
# Through model
remote_field = manager.source_field_name
related_filter = {remote_field: src}
objects = manager.through.objects.filter(**related_filter)
elif field.one_to_many:
remote_field = field.remote_field.name
accessor_name = field.get_accessor_name()
accessor = getattr(src, accessor_name)
objects = accessor.all()
for obj in objects:
try:
with transaction.atomic():
setattr(obj, remote_field, dest)
obj.save()
except IntegrityError:
# Ignore unique constraint violations
pass
src.delete()
class M2MOrderedThroughField(ManyToManyField):
""" Orders m2m related objects by their 'through' Model
When a 'through' model declares an ordering in its Meta
options, it is ignored by Django's default manager.
This field adds the through model to the ordering logic
of the manager so that if the through model declares
an ordering logic, it will be used in the join query
"""
def contribute_to_class(self, cls, *args, **kwargs):
super_return = super().contribute_to_class(cls, *args, **kwargs)
setattr(cls, self.name, M2MOrderedThroughDescriptor(self.remote_field, reverse=False))
return super_return
class M2MOrderedThroughDescriptor(ManyToManyDescriptor):
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
related_manager = create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
return create_m2m_ordered_through_manager(related_manager, self.rel)
@contextmanager
def allow_m2m_operation(through):
""" Enables m2m operations on through models
This is done by flagging the model as auto_created dynamically. It only
works if all your extra fields on the through model have defaults declared.
"""
cached = through._meta.auto_created
through._meta.auto_created = True
try:
yield
finally:
through._meta.auto_created = cached
def create_m2m_ordered_through_manager(related_manager, rel):
class M2MOrderedThroughManager(related_manager):
def _apply_ordering(self, queryset):
# Check for custom related name (there should be a
# .get_related_name() but I can't find anything like it)
related_name = self.source_field.remote_field.related_name
if not related_name:
related_name = self.through._meta.model_name
# equivalent of my_object.relation.all().order_by(related_name)
return queryset.extra(order_by=[related_name])
def get_queryset(self, *args, **kwargs):
""" Here is where we can finally apply our ordering logic"""
qs = super().get_queryset(*args, **kwargs)
return self._apply_ordering(qs)
def add(self, *objs):
with allow_m2m_operation(rel.through):
return super().add(*objs)
def remove(self, *objs):
with allow_m2m_operation(rel.through):
return super().remove(*objs)
def clear(self):
with allow_m2m_operation(rel.through):
return super().clear()
return M2MOrderedThroughManager
|
Lekanich/intellij-community
|
refs/heads/master
|
python/testData/inspections/ReplaceExecComment_after.py
|
79
|
exec(1) # <- doesn't work either
|
xe1gyq/eekmex
|
refs/heads/master
|
sandbox/core/alive.py
|
1
|
#!/usr/bin/python
import logging
from system import System
class Alive(object):
def __init__(self):
self.system = System()
logging.info('Alive Initialization Succeeded!')
def data(self):
cpu = self.system.cpu()
memory = self.system.memory()
message = "Cpu %s / Memory %s" % (cpu, memory)
logging.info(message)
# End of File
|
szilveszter/django
|
refs/heads/master
|
django/core/management/__init__.py
|
7
|
from __future__ import unicode_literals
import collections
from importlib import import_module
import os
import sys
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (BaseCommand, CommandError,
CommandParser, handle_default_options)
from django.core.management.color import color_style
from django.utils import lru_cache
from django.utils import six
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
if command.use_argparse:
# Use the `dest` option name from the parser option
opt_mapping = dict((sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'), s_opt.dest)
for s_opt in parser._actions if s_opt.option_strings)
arg_options = dict((opt_mapping.get(key, key), value) for key, value in options.items())
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
else:
# Legacy optparse method
defaults, _ = parser.parse_args(args=[])
defaults = dict(defaults.__dict__, **options)
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
# This might trigger ImproperlyConfigured (masked in get_commands)
settings.INSTALLED_APPS
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
|
openego/ego.io
|
refs/heads/dev
|
setup.py
|
1
|
#! /usr/bin/env python
# coding: utf-8
from setuptools import find_packages, setup
setup(name='egoio',
author='NEXT ENERGY, Reiner Lemoine Institut gGmbH, ZNES',
author_email='ulf.p.mueller@hs-flensburg.de',
description='ego input/output repository',
version='0.4.8',
url='https://github.com/openego/ego.io',
packages=find_packages(),
license='GNU Affero General Public License v3.0',
install_requires=[
'geoalchemy2 >= 0.3.0',
'sqlalchemy >= 1.2.0',
'keyring >= 4.0',
'keyrings.alt',
'psycopg2-binary',
'oedialect',
'numpy'],
extras_require={
"sqlalchemy": 'postgresql'},
package_data={'tools': ['sqlacodegen_oedb.sh']}
)
|
bmya/addons-yelizariev
|
refs/heads/8.0
|
reminder_task_deadline/models.py
|
16
|
from openerp import models
class task(models.Model):
_name = 'project.task'
_inherit = ['project.task', 'reminder']
_reminder_date_field = 'date_deadline'
_reminder_attendees_fields = ['user_id', 'reviewer_id']
|
gisodal/vimgdb
|
refs/heads/master
|
vimgdb/__init__.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .vimgdb import Vimgdb
from .version import Version
|
tempbottle/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/macpath.py
|
72
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
# These are primarily for export; internally, they are hardcoded.
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
def _get_colon(path):
if isinstance(path, bytes):
return b':'
else:
return ':'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
if not isinstance(path, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(path.__class__.__name__))
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
colon = _get_colon(s)
return colon in s and s[:1] != colon
def join(s, *p):
colon = _get_colon(s)
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == colon:
t = t[1:]
if colon not in path:
path = colon + path
if path[-1:] != colon:
path = path + colon
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
colon = _get_colon(s)
if colon not in s: return s[:0], s
col = 0
for i in range(len(s)):
if s[i:i+1] == colon: col = i + 1
path, file = s[:col-1], s[col:]
if path and not colon in path:
path = path + colon
return path, file
def splitext(p):
if isinstance(p, bytes):
return genericpath._splitext(p, b':', altsep, b'.')
else:
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return p[:0], p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and not components[1]
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except OSError:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
colon = _get_colon(s)
if colon not in s:
return colon + s
comps = s.split(colon)
i = 1
while i < len(comps)-1:
if not comps[i] and comps[i-1]:
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error('Cannot use :: immediately after volume name')
else:
i = i + 1
s = colon.join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1:] == colon and len(comps) > 2 and s != colon*len(s):
s = s[:-1]
return s
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
colon = _get_colon(path)
components = path.split(colon)
path = components[0] + colon
for c in components[1:]:
path = join(path, c)
try:
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
except Carbon.File.Error:
pass
return path
supports_unicode_filenames = True
|
samthetechie/pyFolia
|
refs/heads/master
|
venv/lib/python2.7/sre.py
|
4
|
/usr/lib/python2.7/sre.py
|
bxlab/HiFive_Paper
|
refs/heads/master
|
Scripts/HiCLib/bx-python-0.7.1/build/lib.linux-x86_64-2.7/bx/intervals/operations/join.py
|
6
|
"""
Join two sets of intervals using their overlap as the key. The
intervals MUST be sorted by chrom(lexicographically),
start(arithmetically) and end(arithmetically). This works by simply
walking through the inputs in O(n) time.
"""
import psyco_full
import math
import traceback
import fileinput
from warnings import warn
from bx.intervals.io import *
from bx.intervals.operations import *
from quicksect import IntervalTree
def join(leftSet, rightSet, mincols=1, leftfill=True, rightfill=True):
# Read rightSet into memory:
rightlen = 0
leftlen = 0
rightTree = IntervalTree()
for item in rightSet:
if isinstance(item, GenomicInterval):
rightTree.insert( item, rightSet.linenum, item.fields )
if rightlen == 0: rightlen = item.nfields
for interval in leftSet:
if leftlen == 0 and isinstance(interval, GenomicInterval):
leftlen = interval.nfields
if not isinstance(interval, GenomicInterval):
yield interval
else:
result = []
rightTree.intersect( interval, lambda node: result.append( node ) )
overlap_not_met = 0
for item in result:
if item.start in range(interval.start,interval.end+1) and item.end not in range(interval.start,interval.end+1):
overlap = interval.end-item.start
elif item.end in range(interval.start,interval.end+1) and item.start not in range(interval.start,interval.end+1):
overlap = item.end-interval.start
elif item.start in range(interval.start,interval.end+1) and item.end in range(interval.start,interval.end+1):
overlap = item.end-item.start
else: #the intersecting item's start and end are outside the interval range
overlap = interval.end-interval.start
if overlap < mincols:
overlap_not_met += 1
continue
outfields = list(interval)
map(outfields.append, item.other)
setattr( item, "visited", True )
yield outfields
if (len(result) == 0 or overlap_not_met == len(result)) and rightfill:
outfields = list(interval)
for x in range(rightlen): outfields.append(".")
yield outfields
if leftfill:
def report_unvisited( node, results ):
if not hasattr(node, "visited"):
results.append( node )
results = []
rightTree.traverse( lambda x: report_unvisited( x, results ) )
for item in results:
outfields = list()
for x in range(leftlen): outfields.append(".")
map(outfields.append, item.other)
yield outfields
def interval_cmp(a, b):
interval1 = a[0]
interval2 = b[0]
if not (isinstance(interval1, GenomicInterval) and isinstance(interval2, GenomicInterval)):
return 0
# Both are intervals
if interval1.chrom == interval2.chrom:
center1 = interval1.start + ((interval1.end - interval1.start) / 2)
center2 = interval2.start + ((interval2.end - interval2.start) / 2)
return center1 - center2
else:
if interval1.chrom > interval2.chrom:
return 1
else:
return -1
return 0
def findintersect(interval, sortedlist, mincols):
# find range of intervals that intersect via a binary search
# find lower bound
x = len(sortedlist) / 2
n = int(math.pow(2,math.ceil(math.log(len(sortedlist),2))))
not_found = True
not_done = True
while not_found and not_done:
n = n / 2
if n == 0:
n = 1
not_done = False
if x >= len(sortedlist):
x -= n
elif x < 0:
x += n
else:
if findoverlap(sortedlist[x][0], interval) >= mincols:
not_found = False
else:
comp = interval_cmp(sortedlist[x], [interval, 0])
if comp > 0:
x -= n
else:
x += n
print "\t".join(sortedlist[x][0].fields)
print "not_found = " + str(not_found)
if not_found:
return 0,-1
lowerbound = x
middlebound = x
upperbound = x
while (lowerbound > -1) and (findoverlap(sortedlist[lowerbound-1][0],interval) >= mincols):
lowerbound -= 1
while (upperbound+1 < len(sortedlist)) and (findoverlap(sortedlist[upperbound+1][0],interval) >= mincols):
upperbound += 1
return lowerbound, upperbound
def findoverlap(a, b):
# overlapping
if a.chrom == b.chrom:
return min(a.end, b.end) - max(a.start, b.start)
else:
return 0
|
realsaiko/odoo
|
refs/heads/8.0
|
addons/hr_contract/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/numpy/matrixlib/defmatrix.py
|
42
|
from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
# make translation table
_numchars = '0123456789.-+jeEL'
if sys.version_info[0] >= 3:
class _NumCharTable:
def __getitem__(self, i):
if chr(i) in _numchars:
return chr(i)
else:
return None
_table = _NumCharTable()
def _eval(astr):
str_ = astr.translate(_table)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
else:
_table = [None]*256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
del k
def _eval(astr):
str_ = astr.translate(_table, _todelete)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n==0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n<0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n-1):
result=N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t-q-1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q+1, t):
Z = N.dot(Z, Z)
if beta[t-k-1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)) :
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__') :
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def __repr__(self):
s = repr(self.__array__()).replace('array', 'matrix')
# now, 'matrix' has 6 letters, and 'array' 5, so the columns don't
# line up anymore. We need to add a space.
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
return str(self.__array__())
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis==0:
return self
elif axis==1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]], dtype=bool)
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]], dtype=bool)
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. Names of variables in the current scope may be
referenced, even if `obj` is a string.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
matrix
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
|
jostep/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tpu/python/tpu/tpu_sharding.py
|
30
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0",
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
freeze_config:
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] /= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if unsharded_shapes[i] != unsharded_shapes[self.number_of_shards - 1]:
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
|
andreyvit/pyjamas
|
refs/heads/master
|
examples/jsonrpc/public/services/simplejson/encoder.py
|
8
|
"""
Implementation of JSONEncoder
"""
import re
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = 1e66666
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
return '\\u%04x' % (ord(s),)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False, indent=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If ``indent`` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
``None`` is the most compact representation.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
def _newline_indent(self):
if self.indent is None:
return ''
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
self.current_indent_level += 1
newline_indent = self._newline_indent()
yield '[' + newline_indent
first = True
for value in lst:
if first:
first = False
else:
yield ', ' + newline_indent
for chunk in self._iterencode(value, markers):
yield chunk
self.current_indent_level -= 1
yield self._newline_indent() + ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
self.current_indent_level += 1
newline_indent = self._newline_indent()
yield '{' + newline_indent
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k,dct[k]) for k in keys]
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield ', ' + newline_indent
yield encoder(key)
yield ': '
for chunk in self._iterencode(value, markers):
yield chunk
self.current_indent_level -= 1
yield self._newline_indent() + '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
|
mbatchkarov/dc_evaluation
|
refs/heads/master
|
tests/test_kmean_disco_vectorizer.py
|
1
|
import logging
import pytest
import numpy as np
import pandas as pd
from eval.pipeline.feature_extractors import FeatureExtractor
from eval.scripts.kmeans_disco import cluster_vectors
from eval.pipeline.multivectors import KmeansVectorizer
logging.basicConfig(level=logging.INFO,
format="%(asctime)s\t%(module)s.%(funcName)s (line %(lineno)d)\t%(levelname)s : %(message)s")
@pytest.fixture(scope='module')
def corpus():
# because A and E belong to the same cluster, seeing A in a document
# is equivalent to seen and E, and vice versa. The same goes for B and F.
# Try a few combinations of these "words" in a document, they should all
# be equivalent
return [
['a/N', 'b/V', 'not_in_vocabulary'],
['e/N', 'b/V'],
['a/N', 'f/V'],
['e/N', 'f/V'],
# ------------
['c/J', 'd/N'],
['g/J', 'd/N'],
['c/J', 'h/N'],
['g/J', 'h/N'],
]
@pytest.fixture(scope='module')
def corpus_small():
# some clusters (third and fourth) are not present in the corpus
return [['a/N', 'b/V']]
@pytest.fixture
def clusters(tmpdir):
vector_path = 'tests/resources/twos.vectors.txt'
put_path = str(tmpdir.join('clusters_unit_test.hdf'))
cluster_vectors(vector_path, put_path, n_clusters=4, n_jobs=1)
clusters = pd.read_hdf(put_path, key='clusters').clusters
for i in range(4):
# a and e, b and f, etc, belong to the same cluster
assert clusters[i] == clusters[i + 4]
return put_path
def _vectorize(clusters, corpus):
feature_types = {'extract_unigram_features': set('JVN'), 'extract_phrase_features': []}
v = KmeansVectorizer(min_df=0,
train_time_opts=feature_types,
decode_time_opts=feature_types)
feature_extractor = FeatureExtractor().update(**feature_types)
X, _ = v.fit_transform(corpus, clusters=pd.read_hdf(clusters, key='clusters'),
train_time_extractor=feature_extractor, decode_time_extractor=feature_extractor)
return X, v
def test_kmeans_vectorizer(corpus, corpus_small, clusters):
X, vect = _vectorize(clusters, corpus)
assert X.shape == (8, 4)
print(X.A)
assert list(X.sum(axis=1).A1) == [2] * 8 # num feats in each document
# first four document's vectors are all equal to each other, and so are the last four
for i in range(3):
np.testing.assert_array_equal(X.A[i, :], X.A[i + 1, :])
np.testing.assert_array_equal(X.A[i + 4, :], X.A[i + 5, :])
X, _ = vect.transform(corpus_small)
print(X.A)
assert X.shape == (1, 4)
def test_kmeans_vectorizer_missing_clusters(corpus_small, clusters):
# when a cluster is missing from the labelled corpus, it should not be added to the vocabulary
# this will cause problems later
X, _ = _vectorize(clusters, corpus_small)
print(X.A)
assert X.shape == (1, 2)
|
stannynuytkens/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/mofosex.py
|
14
|
from __future__ import unicode_literals
from ..utils import (
int_or_none,
str_to_int,
unified_strdate,
)
from .keezmovies import KeezMoviesIE
class MofosexIE(KeezMoviesIE):
_VALID_URL = r'https?://(?:www\.)?mofosex\.com/videos/(?P<id>\d+)/(?P<display_id>[^/?#&.]+)\.html'
_TESTS = [{
'url': 'http://www.mofosex.com/videos/318131/amateur-teen-playing-and-masturbating-318131.html',
'md5': '558fcdafbb63a87c019218d6e49daf8a',
'info_dict': {
'id': '318131',
'display_id': 'amateur-teen-playing-and-masturbating-318131',
'ext': 'mp4',
'title': 'amateur teen playing and masturbating',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20121114',
'view_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 18,
}
}, {
# This video is no longer available
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'only_matching': True,
}]
def _real_extract(self, url):
webpage, info = self._extract_info(url)
view_count = str_to_int(self._search_regex(
r'VIEWS:</span>\s*([\d,.]+)', webpage, 'view count', fatal=False))
like_count = int_or_none(self._search_regex(
r'id=["\']amountLikes["\'][^>]*>(\d+)', webpage,
'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'id=["\']amountDislikes["\'][^>]*>(\d+)', webpage,
'like count', fatal=False))
upload_date = unified_strdate(self._html_search_regex(
r'Added:</span>([^<]+)', webpage, 'upload date', fatal=False))
info.update({
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'upload_date': upload_date,
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
|
joelddiaz/openshift-tools
|
refs/heads/prod
|
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_group.py
|
7
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/group -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_group
short_description: Modify, and idempotently manage openshift groups.
description:
- Modify openshift groups programmatically.
options:
state:
description:
- Supported states, present, absent, list
- present - will ensure object is created or updated to the value specified
- list - will return a group
- absent - will remove the group
required: False
default: present
choices: ["present", 'absent', 'list']
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
author:
- "Joel Diaz <jdiaz@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create group
oc_group:
state: present
name: acme_org
register: group_out
'''
# -*- -*- -*- End included fragment: doc/group -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/group.py -*- -*- -*-
class GroupConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig):
''' constructor for handling group options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Group'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['users'] = None
# pylint: disable=too-many-instance-attributes
class Group(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self, content):
'''Group constructor'''
super(Group, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_group.py -*- -*- -*-
class OCGroup(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self,
config,
verbose=False):
''' Constructor for OCGroup '''
super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._group = None
@property
def group(self):
''' property function service'''
if not self._group:
self.get()
return self._group
@group.setter
def group(self, data):
''' setter function for yedit var '''
self._group = data
def exists(self):
''' return whether a group exists '''
if self.group:
return True
return False
def get(self):
'''return group information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.group = Group(content=result['results'][0])
elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
'''run the idempotent ansible code'''
gconfig = GroupConfig(params['name'],
params['namespace'],
params['kubeconfig'],
)
oc_group = OCGroup(gconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': state}
########
# Delete
########
if state == 'absent':
if oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_group.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_group.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_group.needs_update():
api_rval = oc_group.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
# -*- -*- -*- End included fragment: class/oc_group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_group.py -*- -*- -*-
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for group
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
namespace=dict(default='default', type='str'),
# addind users to a group is handled through the oc_users module
#users=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCGroup.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_group.py -*- -*- -*-
|
tchernomax/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py
|
33
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerinstance
version_added: "2.5"
short_description: Manage an Azure Container Instance.
description:
- Create, update and delete an Azure Container Instance.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- The name of the container group.
required: true
os_type:
description:
- The OS type of containers.
choices:
- linux
- windows
default: linux
state:
description:
- Assert the state of the container instance. Use 'present' to create or update an container instance and 'absent' to delete it.
default: present
choices:
- absent
- present
ip_address:
description:
- The IP address type of the container group (default is 'none')
choices:
- public
- none
default: 'none'
ports:
description:
- List of ports exposed within the container group.
location:
description:
- Valid azure location. Defaults to location of the resource group.
registry_login_server:
description:
- The container image registry login server.
registry_username:
description:
- The username to log in container image registry server.
registry_password:
description:
- The password to log in container image registry server.
containers:
description:
- List of containers.
suboptions:
name:
description:
- The name of the container instance.
required: true
image:
description:
- The container image name.
required: true
memory:
description:
- The required memory of the containers in GB.
default: 1.5
cpu:
description:
- The required number of CPU cores of the containers.
default: 1
ports:
description:
- List of ports exposed within the container group.
force_update:
description:
- Force update of existing container instance. Any update will result in deletion and recreation of existing containers.
type: bool
default: 'no'
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create sample container group
azure_rm_containerinstance:
resource_group: testrg
name: mynewcontainergroup
os_type: linux
ip_address: public
ports:
- 80
- 81
containers:
- name: mycontainer1
image: httpd
memory: 1.5
ports:
- 80
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.ContainerInstance/containerGroups/aci1b6dd89
provisioning_state:
description:
- Provisioning state of the container.
returned: always
type: str
sample: Creating
ip_address:
description:
- Public IP Address of created container group.
returned: if address is public
type: str
sample: 175.12.233.11
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
def create_container_dict_from_obj(container):
'''
Create a dict from an instance of a Container.
:param rule: Container
:return: dict
'''
results = dict(
name=container.name,
image=container.image,
memory=container.resources.requests.memory_in_gb,
cpu=container.resources.requests.cpu
# command (list of str)
# ports (list of ContainerPort)
# environment_variables (list of EnvironmentVariable)
# resources (ResourceRequirements)
# volume mounts (list of VolumeMount)
)
if container.instance_view is not None:
# instance_view (ContainerPropertiesInstanceView)
results["instance_restart_count"] = container.instance_view.restart_count
if container.instance_view.current_state:
results["instance_current_state"] = container.instance_view.current_state.state
results["instance_current_start_time"] = container.instance_view.current_state.start_time
results["instance_current_exit_code"] = container.instance_view.current_state.exit_code
results["instance_current_finish_time"] = container.instance_view.current_state.finish_time
results["instance_current_detail_status"] = container.instance_view.current_state.detail_status
if container.instance_view.previous_state:
results["instance_previous_state"] = container.instance_view.previous_state.state
results["instance_previous_start_time"] = container.instance_view.previous_state.start_time
results["instance_previous_exit_code"] = container.instance_view.previous_state.exit_code
results["instance_previous_finish_time"] = container.instance_view.previous_state.finish_time
results["instance_previous_detail_status"] = container.instance_view.previous_state.detail_status
# events (list of ContainerEvent)
return results
class AzureRMContainerInstance(AzureRMModuleBase):
"""Configuration class for an Azure RM container instance resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
os_type=dict(
type='str',
default='linux',
choices=['linux', 'windows']
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
),
ip_address=dict(
type='str',
default='none',
choices=['public', 'none']
),
ports=dict(
type='list',
default=[]
),
registry_login_server=dict(
type='str',
default=None
),
registry_username=dict(
type='str',
default=None
),
registry_password=dict(
type='str',
default=None,
no_log=True
),
containers=dict(
type='list',
required=True
),
force_update=dict(
type='bool',
default=False
),
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.ip_address = None
self.containers = None
self.tags = None
self.results = dict(changed=False, state=dict())
self.cgmodels = None
super(AzureRMContainerInstance, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
results = dict()
# since this client hasn't been upgraded to expose models directly off the OperationClass, fish them out
self.cgmodels = self.containerinstance_client.container_groups.models
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
response = self.get_containerinstance()
if not response:
self.log("Container Group doesn't exist")
if self.state == 'absent':
self.log("Nothing to delete")
else:
self.force_update = True
else:
self.log("Container instance already exists")
if self.state == 'absent':
if not self.check_mode:
self.delete_containerinstance()
self.results['changed'] = True
self.log("Container instance deleted")
elif self.state == 'present':
self.log("Need to check if container group has to be deleted or may be updated")
update_tags, newtags = self.update_tags(response.get('tags', dict()))
if update_tags:
self.tags = newtags
if self.force_update:
self.log('Deleting container instance before update')
if not self.check_mode:
self.delete_containerinstance()
if self.state == 'present':
self.log("Need to Create / Update the container instance")
if self.force_update:
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_containerinstance()
self.results['id'] = response['id']
self.results['provisioning_state'] = response['provisioning_state']
self.results['ip_address'] = response['ip_address']['ip']
self.log("Creation / Update done")
return self.results
def create_update_containerinstance(self):
'''
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
:return: deserialized container instance state dictionary
'''
self.log("Creating / Updating the container instance {0}".format(self.name))
registry_credentials = None
if self.registry_login_server is not None:
registry_credentials = [self.cgmodels.ImageRegistryCredential(server=self.registry_login_server,
username=self.registry_username,
password=self.registry_password)]
ip_address = None
if self.ip_address == 'public':
# get list of ports
if self.ports:
ports = []
for port in self.ports:
ports.append(self.cgmodels.Port(port=port, protocol="TCP"))
ip_address = self.cgmodels.IpAddress(ports=ports, ip=self.ip_address)
containers = []
for container_def in self.containers:
name = container_def.get("name")
image = container_def.get("image")
memory = container_def.get("memory", 1.5)
cpu = container_def.get("cpu", 1)
ports = []
port_list = container_def.get("ports")
if port_list:
for port in port_list:
ports.append(self.cgmodels.ContainerPort(port=port))
containers.append(self.cgmodels.Container(name=name,
image=image,
resources=self.cgmodels.ResourceRequirements(
requests=self.cgmodels.ResourceRequests(memory_in_gb=memory, cpu=cpu)
),
ports=ports))
parameters = self.cgmodels.ContainerGroup(location=self.location,
containers=containers,
image_registry_credentials=registry_credentials,
restart_policy=None,
ip_address=ip_address,
os_type=self.os_type,
volumes=None,
tags=self.tags)
response = self.containerinstance_client.container_groups.create_or_update(resource_group_name=self.resource_group,
container_group_name=self.name,
container_group=parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
return response.as_dict()
def delete_containerinstance(self):
'''
Deletes the specified container group instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the container instance {0}".format(self.name))
response = self.containerinstance_client.container_groups.delete(resource_group_name=self.resource_group, container_group_name=self.name)
return True
def get_containerinstance(self):
'''
Gets the properties of the specified container service.
:return: deserialized container instance state dictionary
'''
self.log("Checking if the container instance {0} is present".format(self.name))
found = False
try:
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Container instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the container instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMContainerInstance()
if __name__ == '__main__':
main()
|
nekulin/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/build/gyp/test/win/gyptest-link-subsystem.py
|
239
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure subsystem setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('subsystem.gyp', chdir=CHDIR)
test.build('subsystem.gyp', 'test_console_ok', chdir=CHDIR)
test.build('subsystem.gyp', 'test_console_fail', chdir=CHDIR, status=1)
test.build('subsystem.gyp', 'test_windows_ok', chdir=CHDIR)
test.build('subsystem.gyp', 'test_windows_fail', chdir=CHDIR, status=1)
test.build('subsystem.gyp', 'test_console_xp', chdir=CHDIR)
test.build('subsystem.gyp', 'test_windows_xp', chdir=CHDIR)
# Make sure we are targeting XP.
def GetHeaders(exe):
return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
if '5.01 subsystem version' not in GetHeaders('test_console_xp.exe'):
test.fail_test()
if '5.01 subsystem version' not in GetHeaders('test_windows_xp.exe'):
test.fail_test()
# TODO(scottmg): There are other subsystems (WinCE, etc.) that we don't use.
test.pass_test()
|
razvanphp/arangodb
|
refs/heads/devel
|
3rdParty/V8-3.31.74.1/build/gyp/test/ninja/chained-dependency/gyptest-chained-dependency.py
|
246
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that files generated by two-steps-removed actions are built before
dependent compile steps.
"""
import os
import sys
import TestGyp
# This test is Ninja-specific in that:
# - the bug only showed nondeterministically in parallel builds;
# - it relies on a ninja-specific output file path.
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('chained-dependency.gyp')
objext = '.obj' if sys.platform == 'win32' else '.o'
test.build('chained-dependency.gyp',
os.path.join('obj', 'chained.chained' + objext))
# The test passes if the .o file builds successfully.
test.pass_test()
|
pshchelo/ironic
|
refs/heads/master
|
api-ref/source/conf.py
|
6
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# ironic documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import openstackdocstheme
html_theme = 'openstackdocs'
html_theme_path = [openstackdocstheme.get_html_theme_path()]
html_theme_options = {
"sidebar_mode": "toc",
}
extensions = [
'os_api_ref',
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ironic API Reference'
copyright = u'OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from ironic.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# Config logABug feature
giturl = u'https://git.openstack.org/cgit/openstack/ironic/tree/api-ref/source'
# source tree
# html_context allows us to pass arbitrary values into the html template
html_context = {"bug_tag": "api-ref",
"giturl": giturl,
"bug_project": "ironic"}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:%ad, commit %h", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ironicdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Ironic.tex', u'OpenStack Bare Metal API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyStringConcatenationToFormatIntentionTest/escapingPy3_after.py
|
83
|
string = "string"
some_string = "some \\ \" escaping {0}".format(string)
|
buntyke/Flask
|
refs/heads/master
|
microblog/flask/lib/python2.7/site-packages/setuptools/ssl_support.py
|
459
|
import os
import socket
import atexit
import re
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
from setuptools.compat import urllib2
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
""".strip().split()
HTTPSHandler = HTTPSConnection = object
for what, where in (
('HTTPSHandler', ['urllib2','urllib.request']),
('HTTPSConnection', ['httplib', 'http.client']),
):
for module in where:
try:
exec("from %s import %s" % (module, what))
except ImportError:
pass
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib2.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
|
kisoku/ansible
|
refs/heads/devel
|
lib/ansible/new_inventory/host.py
|
236
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Host:
def __init__(self, name):
self._name = name
self._connection = None
self._ipv4_address = ''
self._ipv6_address = ''
self._port = 22
self._vars = dict()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._name
def get_groups(self):
return []
def set_variable(self, name, value):
''' sets a variable for this host '''
self._vars[name] = value
def get_vars(self):
''' returns all variables for this host '''
all_vars = self._vars.copy()
all_vars.update(dict(inventory_hostname=self._name))
return all_vars
|
ChristophorusX/Proxy-Hentai-Downloader
|
refs/heads/master
|
xeHentai/i18n/zh_hant.py
|
2
|
# coding:utf-8
from ..const import *
err_msg = {
ERR_URL_NOT_RECOGNIZED: "網址不夠紳士",
ERR_CANT_DOWNLOAD_EXH: "需要登錄後才能下載里站",
ERR_ONLY_VISIBLE_EXH: "這個本子只有里站能看到",
ERR_MALFORMED_HATHDL: "hathdl文件有貓餅,解析失敗",
ERR_GALLERY_REMOVED: "這個本子被移除了,大概里站能看到",
ERR_NO_PAGEURL_FOUND: "沒有找到頁面鏈接,網站改版了嘛?",
ERR_CONNECTION_ERROR: "連接有問題?",
ERR_IP_BANNED: "IP被ban了, 恢復時間: %s",
ERR_TASK_NOT_FOUND: "沒有該GUID對應的任務",
ERR_TASK_LEVEL_UNDEF: "任務過濾等級不存在",
ERR_DELETE_RUNNING_TASK: "無法刪除運行中的任務",
ERR_TASK_CANNOT_PAUSE: "這個任務無法被暫停",
ERR_TASK_CANNOT_RESUME: "這個任務無法被恢復",
ERR_CANNOT_CREATE_DIR: "無法創建文件夾 %s",
ERR_CANNOT_MAKE_ARCHIVE: "無法製作壓縮包 %s",
# ERR_HATHDL_NOTFOUND: "hathdl文件未找到"
ERR_RPC_PARSE_ERROR: "Parse error.",
ERR_RPC_INVALID_REQUEST: "Invalid request.",
ERR_RPC_METHOD_NOT_FOUND: "Method not found.",
ERR_RPC_INVALID_PARAMS: "Invalid method parameter(s).",
ERR_RPC_UNAUTHORIZED: "Unauthorized",
ERR_RPC_EXEC_ERROR: "",
ERR_SAVE_SESSION_FAILED: "",
}
ERR_NOMSG = "未指定的錯誤,錯誤號 %d"
XEH_OPT_DESC = "紳♂士下載器"
XEH_OPT_EPILOG = "如果參數未指定,則使用config.py中的默認值; " \
"討論和反饋問題:https://yooooo.us/2013/xehentai"
XEH_OPT_URLS = "下載頁的網址"
XEH_OPT_u = "用戶名"
XEH_OPT_k = "密碼"
XEH_OPT_c = "Cookie字符串,如果指定了用戶名和密碼,此項會被忽略"
XEH_OPT_o = "是否下載原始圖片(如果存在),需要登錄 (當前: %(default)s)"
XEH_OPT_t = "下載線程數 (當前: %(default)d)"
# XEH_OPT_f = "快速掃描,從hathdl猜測頁面鏈接,但有時會抽風 (當前: %(default)s)"
XEH_OPT_l = "保存日誌的路徑 (當前: %(default)s)"
XEH_OPT_p = "設置代理, 可以指定多次, 當前支持的類型: socks5/4a, http(s), glype (當前: %(default)s)"
XEH_OPT_proxy_image = "同時使用代理來下載圖片,如果設為否則只將代理用於掃描網頁(當前: %(default)s)"
XEH_OPT_d = "設置下載目錄 (當前: %(default)s)"
XEH_OPT_v = "設置日誌裝逼等級 (當前: %(default)s)"
XEH_OPT_i = "交互模式,如果開啟後台模式,此項會被忽略 (當前: %(default)s)"
XEH_OPT_r = "將圖片重命名為原始名稱,如果關閉則使用序號 (當前: %(default)s)"
XEH_OPT_daemon = "後台模式 (當前: %(default)s)"
XEH_OPT_rpc_interface = "設置JSON-RPC監聽IP (當前: %(default)s)"
XEH_OPT_rpc_port = "設置JSON-RPC監聽埠 (當前: %(default)s)"
XEH_OPT_rpc_secret = "設置JSON-RPC密鑰 (當前: %(default)s)"
XEH_OPT_a = "下載完成後生成zip壓縮包並刪除下載目錄 (當前: %(default)s)"
XEH_OPT_h = "顯示本幫助信息"
XEH_OPT_version = "顯示版本信息"
XEH_OPT_IGNORING_I = "後台模式已忽略 -i 參數"
PS_LOGIN = "當前沒有登陸,要登陸嗎 (y/n)? > "
PS_USERNAME = "輸入用戶名 > "
PS_PASSWD = "輸入密碼 > "
PS_URL = "輸入地址(使用,分割下載多個)> "
PS_PROXY = "輸入代理地址 (可選) > "
PS_DOWNLOAD_ORI = "是否下載原圖(默認%s) (y/n)? > "
PS_RENAME_ORI = "是否自動重命名(默認%s) (y/n)? > "
PS_MAKE_ARCHIVE = "是否製作zip壓縮包(默認%s) (y/n)? > "
PS_DOWNLOAD_DIR = "下載目錄 (當前: %s)\n回車確認或輸入新路徑 > "
PROXY_CANDIDATE_CNT = "代理池中有%d個代理"
TASK_PUT_INTO_WAIT = "任務 #%s 已存在, 加入等待隊列"
TASK_ERROR = "任務 #%s 發生錯誤: %s"
TASK_MIGRATE_EXH = "任務 #%s 使用里站地址重新下載"
TASK_TITLE = "任務 #%s 標題 %s"
TASK_WILL_DOWNLOAD_CNT = "任務 #%s 將下載%d個文件,共%d個 "
TASK_START = "任務 #%s 開始"
TASK_FINISHED = "任務 #%s 下載完成"
TASK_START_PAGE_RESCAN = "任務 #%s 圖片被縮放,進行完整掃描"
# TASK_FAST_SCAN = "任務 #%s 使用快速掃描"
TASK_START_MAKE_ARCHIVE = "任務 #%s 開始打包"
TASK_MAKE_ARCHIVE_FINISHED = "任務 #%s 打包完成,保存在: %s, 用時%.1f秒"
TASK_STOP_QUOTA_EXCEEDED = "任務 #%s 配額超限"
TASK_STUCK = "任務 #%s 卡住了, 可能是腳本有bug, 或者網絡連接太慢了"
XEH_STARTED = "xeHentai %s 已啟動"
XEH_LOOP_FINISHED = "程序循環已完成"
XEH_LOGIN_EXHENTAI = "登錄紳士"
XEH_LOGIN_OK = "已成為紳士"
XEH_LOGIN_FAILED = "無法登錄紳士;檢查輸入是否有誤或者換一個帳號。\n推薦在瀏覽器登錄後使用RPC複製cookie到xeHentai (教程: http://t.cn/Rctr4Pf)"
XEH_LOAD_TASKS_CNT = "從存檔中讀取了%d個任務"
XEH_LOAD_OLD_COOKIE = "從1.x版cookie文件從讀取了登錄信息"
XEH_DAEMON_START = "後台進程已啟動,PID為%d"
XEH_PLATFORM_NO_DAEMON = "後台模式不支持您的系統: %s"
XEH_CLEANUP = "擦乾淨..."
XEH_CRITICAL_ERROR = "xeHentai 抽風啦:\n%s"
XEH_DOWNLOAD_ORI_NEED_LOGIN = "下載原圖需要登錄"
XEH_FILE_DOWNLOADED = "圖片已下載 #%d %s"
XEH_RENAME_HAS_ERRORS = "部分圖片重命名失敗:%s"
RPC_STARTED = "RPC伺服器監聽在 %s:%d"
RPC_TOO_OPEN = "RPC伺服器監聽在公網IP (%s),為了安全起見應該設置rpc_secret"
RPC_CANNOT_BIND = "RPC伺服器無法啟動:%s"
SESSION_LOAD_EXCEPTION = "讀取存檔時遇到錯誤: %s"
SESSION_WRITE_EXCEPTION = "寫入存檔時遇到錯誤: %s"
THREAD = "紳士"
THREAD_UNCAUGHT_EXCEPTION = "紳士-%s 未捕獲的異常\n%s"
THREAD_MAY_BECOME_ZOMBIE = "紳士-%s 可能變成了喪屍"
THREAD_SWEEP_OUT = "紳士-%s 掛了, 不再理它"
QUEUE = "隊列"
PROXY_DISABLE_BANNED = "禁用了一個被ban的代理,將在約%s秒後恢復"
|
oascigil/inrpp
|
refs/heads/master
|
src/uan/test/examples-to-run.py
|
195
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("uan-rc-example", "True", "True"),
("uan-cw-example", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
akiokio/dot
|
refs/heads/master
|
src/dot_app/manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dot_app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
ROMFactory/android_external_chromium_org
|
refs/heads/kitkat
|
third_party/closure_linter/closure_linter/common/tokens_test.py
|
126
|
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()
|
supistar/Botnyan
|
refs/heads/master
|
api/slack.py
|
1
|
# -*- encoding:utf8 -*-
import os
import urllib
import re
from flask import Blueprint, request, Response, abort
from flask_negotiate import consumes
from flask.ext.cors import cross_origin
import settings
from model.utils import Utils
from model.loader import PluginLoader
slack = Blueprint('slack', __name__, url_prefix='/api/slack')
@slack.route("/webhook", methods=['POST'])
@cross_origin()
@consumes('application/x-www-form-urlencoded')
def webhook():
# For debug
form = request.form
print form
# Check slack webhook token in request body
request_token = Utils().parse_dic(form, 'token', 400)
token = os.environ.get('SLACK_WEBHOOK_TOKEN')
if not token or token != request_token:
abort(401)
# Parse request body
username = Utils().parse_dic(form, 'user_name')
trigger_word_uni = Utils().parse_dic(form, 'trigger_word')
text_uni = Utils().parse_dic(form, 'text')
# Check trigger user is not bot
if not username or 'bot' in username:
dic = {}
return Response(Utils().dump_json(dic), mimetype='application/json')
botname = settings.BOTNAME
if not botname:
abort(500)
re_flags = settings.RE_FLAGS
plugins = PluginLoader().get_plugins()
content = None
kwargs = {
'text': text_uni,
'trigger_word': trigger_word_uni,
'botname': botname
}
for plugin in plugins:
regex_uni = Utils().convert_unicode(plugin().hear_regex(**kwargs))
print("Using plugin : %r" % plugin)
print("Using regex : %r" % regex_uni)
print("Target text : %r" % urllib.unquote_plus(text_uni))
if re.compile(regex_uni, re_flags).match(urllib.unquote_plus(text_uni)):
print("Regex found :)")
content = plugin().response(**kwargs)
break
if not content:
dic = {}
else:
if isinstance(content, dict) and (content.get('text') or content.get('attachments')):
dic = content
else:
dic = {"text": content}
dic["link_names"] = 1
return Response(Utils().dump_json(dic), mimetype='application/json')
|
excelly/xpy-ml
|
refs/heads/master
|
sdss/detection/detection_clusters_bagmodel.py
|
1
|
# detection anomalous clusters using the bag of gaussian models.
from ex import *
from ex.plott import *
from ex.ml import PCA
from ex.ml.bag_model import *
from ex.ml.gmm import *
from scipy.special import psi
import base
import detector
import report
from feature import GetFeatures
def usage():
print('''
detection anomalous spatial clusters using bag-of-gaussian models
python --cluster_file={result of dr7_spatial.py} --feature_names={SpectrumS1} [--size_range={10-50}] [--model_penalty=1]
''')
sys.exit(1)
if __name__ == '__main__':
InitLog()
opts=getopt(sys.argv[1:], ['nproc=', 'cluster_file=', 'feature_names=', 'size_range=', 'model_penalty='],
usage)
nproc=int(opts.get('--nproc', 1))
cluster_file = opts['--cluster_file']
feature_names = opts.get('--feature_names', 'SpectrumS1')
size_range=opts.get('--size_range', "20-50")
size_range = [int(s) for s in size_range.split('-')]
model_penalty = float(opts.get('--model_penalty', 1))
output_dir='./cluster_bagmodel/'
MakeDir(output_dir)
tag='{0}[{1}][{2}-{3}][{4}]'.format(
cluster_file.replace('.pkl', '').replace('spatial_clusters_', ''),
feature_names, size_range[0], size_range[1], model_penalty)
log.info('Run name: {0}'.format(tag))
# load the data
cluster_data=LoadPickles(cluster_file)
#{'clusters', 'spec_ids'}
feature, info = GetFeatures(feature_names, nproc = 1)
#info: ['bestObjID', 'rdz', 'spec_cln', 'specObjID', 'ID']
spec_ids=info['specObjID']
spec_cln=info['spec_cln']
pos = hstack((info['pos'], col(info['z'])))
# make sure the data are compatible
check(spec_ids == cluster_data['spec_ids'], 'incompatible data')
# pca
log.info('Training PCA...')
pca = PCA.Train(feature, 0.99)
# figure()
# pca.Visualize(feature[int64(linspace(0, len(feature)-1, 10000))])
# pause()
feature = pca.Project(feature, min(pca.R, 50))
clusters = cluster_data['clusters']
# filter clusters
clusters=[c for c in clusters if len(c) >= size_range[0] and len(c) <= size_range[1]]
log.info('{0} clusters found between size range {1}'.format(len(clusters), size_range))
# make bag-of-gaussians data
M = len(clusters)
N = [len(c) for c in clusters]
cN = cat(([0], cumsum(N)))
npoint = cN[-1]
X = zeros((npoint, feature.shape[1]))
group_id = zeros(npoint, int32)
for m in range(M):
X[cN[m]:cN[m+1]] = feature[clusters[m]]
group_id[cN[m]:cN[m+1]] = m
####### detection
Ts = arange(3, 8)[::-1]
Ks = arange(3, 10)[::-1]
options = {'ntry':10, 'init':'kmeans', 'bic_coeff':model_penalty,
'epsilon':1e-3, 'maxIter':50, 'verbose':False, 'symmetric':True,
'nproc':1, 'nproc_bic':nproc}
# MGMM
R_mgmm, L_mgmm, stat_mgmm = FitMGMM_BICSearch(X, group_id, Ts, Ks, options)
print stat_mgmm
T = int(stat_mgmm[argmax(stat_mgmm[:,-1]), 0])
K = int(stat_mgmm[argmax(stat_mgmm[:,-1]), 1])
log.info('T=%d, K=%d selected for MGMM.' % (T, K))
pi, chi, mu, sigma, gama, phi = R_mgmm
lnpdf = GaussianPDF(X, mu, sigma)
l_mgmm = MGMMGroupLikelihood(group_id, lnpdf, R_mgmm)
l_mgmm_true = MGMMGroupLikelihoodTruth(group_id, lnpdf, R_mgmm)
log.info('Var likelihood = %g; True likelihood = %g' % (l_mgmm.sum(), l_mgmm_true.sum()))
scores_mgmm = -l_mgmm_true
rank_mgmm = argsort(scores_mgmm)[::-1]
options['nproc'] = nproc
options['nproc_bic'] = 1
# GLDA
R_glda, L_glda = FitGLDA(X, group_id, K, options)
alpha, mu, sigma, gama, phi = R_glda
lnpdf = GaussianPDF(X, mu, sigma)
pg = psi(gama) - col(psi(gama.sum(1)))
l_glda = GLDAGroupLikelihood(group_id, lnpdf, pg, R_glda)
scores_glda = -l_glda
rank_glda = argsort(scores_glda)[::-1]
# GMM
R_gmm, L_gmm = FitGMM(X, K, options)
pi, mu, sigma = R_gmm
lnpdf = GaussianPDF(X, mu, sigma)
l_gmm = GMMLikelihood(lnpdf.T, R_gmm)
l_gmm = accumarray(group_id, l_gmm, zeros(M))
scores_gmm = -l_gmm
rank_gmm = argsort(scores_gmm)[::-1]
cluster_info = ['MGMM: {0} <br> GLDA: {1} <br> GMM: {2}'.format(rank_mgmm[i], rank_glda[i], rank_gmm[i]) for i in range(len(clusters))]
html_an, html_all=report.GenReportCluster(clusters, scores_glda, spec_ids, pos, cluster_info, None, 20)
SaveText('{0}/report_glda_{1}_anomaly.html'.format(output_dir, tag), html_an)
SaveText('{0}/report_glda_{1}_all.html'.format(output_dir, tag), html_all)
html_an, html_all=report.GenReportCluster(clusters, scores_gmm, spec_ids, pos, cluster_info, None, 20)
SaveText('{0}/report_gmm_{1}_anomaly.html'.format(output_dir, tag), html_an)
SaveText('{0}/report_gmm_{1}_all.html'.format(output_dir, tag), html_all)
html_an, html_all=report.GenReportCluster(clusters, scores_mgmm, spec_ids, pos, cluster_info, None, 20)
SaveText('{0}/report_mgmm_{1}_anomaly.html'.format(output_dir, tag), html_an)
SaveText('{0}/report_mgmm_{1}_all.html'.format(output_dir, tag), html_all)
scores_mgmm_v_glda = fabs(rank_mgmm - rank_glda)
html_an, html_all=report.GenReportCluster(clusters, scores_mgmm_v_glda, spec_ids, pos, cluster_info, None, 20)
SaveText('{0}/report_mgmm_v_glda_{1}_anomaly.html'.format(output_dir, tag), html_an)
SaveText('{0}/report_mgmm_v_glda_{1}_all.html'.format(output_dir, tag), html_all)
scores_mgmm_v_gmm = fabs(rank_mgmm - rank_gmm)
html_an, html_all=report.GenReportCluster(clusters, scores_mgmm_v_gmm, spec_ids, pos, cluster_info, None, 20)
SaveText('{0}/report_mgmm_v_gmm_{1}_anomaly.html'.format(output_dir, tag), html_an)
SaveText('{0}/report_mgmm_v_gmm_{1}_all.html'.format(output_dir, tag), html_all)
workspace = (l_gmm, l_glda, L_gmm, clusters, R_gmm, logsafe, l_mgmm_true, X, L_mgmm, group_id, npoint, cluster_file, L_glda, stat_mgmm, cluster_data, cN, scores_glda, R_mgmm, options, l_mgmm, pos, scores_gmm, model_penalty, N, Ks, info, Ts, tag, feature_names, R_glda, M, scores_mgmm, nproc, size_range, spec_ids, rank_mgmm, rank_glda, rank_gmm, T, K)
SavePickle('{0}/workspace_{1}.pkl'.format(output_dir, tag), workspace)
|
lemonsong/lemonsong.github.io
|
refs/heads/master
|
blog/pelican-plugins/goodreads_activity/__init__.py
|
76
|
from .goodreads_activity import *
|
QubitPi/HadooPyTester
|
refs/heads/master
|
examples/topN/reducer.py
|
6
|
#!/usr/bin/env python
import sys
from reduce_function import reduce_function
from emit import emit
reduce_function = reduce_function()
emit = emit()
for line in sys.stdin: # input comes from STDIN
line = line.strip() # remove leading and trailing whitespace
key, value = line.split('\t', 1) # parse the input we got from mapper.py
try:
key = int(key) # convert key(currently string) to int
except ValueError:
# not a number, stop program execution #
print "Key \"%s\" is not numeric" % number
print "Reducer stops"
sys.exit(1);
try:
value = int(value) # convert value(currently string) to int
except ValueError:
# not a number, stop program execution #
print "Value \"%s\" is not numeric" % number
print "Reducer stops"
sys.exit(1);
emit( reduce_function(key, value) );
|
sparkslabs/kamaelia_
|
refs/heads/master
|
Tests/Python/Axon/test_AdaptiveCommsComponent.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# Full coverage testing of the Adaptive Comms Component
#
# Test the module loads
import unittest
import Axon.AdaptiveCommsComponent as AdaptiveCommsComponent
import Axon.Component as Component
class AdaptiveCommsComponent_Test(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"__init__ - Called with no arguments is expected, results in component superconstructor being called. performs no local initialisation"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
self.assert_( isinstance(a,AdaptiveCommsComponent.AdaptiveCommsComponent), "Right Type")
self.assert_( isinstance(a,Component.component), "Right Base Type")
self.assert_( 0==len(a.inboxes['control']) and 0==len(a.inboxes['inbox']) and 2==len(a.inboxes), "Correct Basic inboxes")
self.assert_( 0==len(a.outboxes['signal']) and 0==len(a.outboxes['outbox']) and 2==len(a.outboxes), "Correct Basic outboxes")
def test_SmokeTest_Arguments(self):
"__init__ - Called with with arguments does not cause problems"
AdaptiveCommsComponent.AdaptiveCommsComponent("hello","world")
def test_addInbox_newName(self):
"addInbox - adding an inbox with a completely new name results in that inbox being created/added"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
name=a.addInbox("name")
self.assertEqual("name",name,"Inbox was added with the name we requested")
inboxNames = list(a.inboxes)
self.assert_( name in inboxNames, "Inbox was added")
self.assert_( len(a.inboxes[name])==0, "Inbox created is empty")
def test_addInbox_existingName(self):
"addInbox - adding an inbox with an existing name results in an inbox being created/added with a similar name - same name with a suffix"
import re
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
name=a.addInbox("name")
name=a.addInbox("name") # Attempt to create second box with existing name
self.assertNotEqual("name",name,"Inbox was added with the name we requested")
self.assert_(re.match("name",name), "Inbox created has a simlar name (same start)")
def test_addOutbox_newName(self):
"addOutbox - adding an outbox with a completely new name results in that outbox being created/added"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
name=a.addOutbox("name")
self.assertEqual("name",name,"Inbox was added with the name we requested")
outboxNames = list(a.outboxes)
self.assert_( name in outboxNames, "Outbox was added")
self.assert_( len(a.outboxes[name])==0, "Outbox created was empty")
def test_addOutbox_existingName(self):
"addOutbox - adding an outbox with an existing name results in an outbox being created/added with a similar name - same name with a suffix"
import re
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
name=a.addOutbox("name")
name=a.addOutbox("name") # Attempt to create second box with existing name
self.assertNotEqual("name",name,"Inbox was added with the name we requested")
self.assert_(re.match("name",name), "Inbox created has a simlar name (same start)")
def test_InboxModification(self):
"-Acceptance Test - Check Addition and Deletion of Inboxes"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
addedNames = []
for i in [1,2,3,4]:
inboxNames = list(a.inboxes)
name=a.addInbox("name")
self.assert_( name not in inboxNames, "Inbox added had a new name")
inboxNames = list(a.inboxes)
self.assert_( name in inboxNames, "Inbox was added")
addedNames.append(name)
self.assert_( len(a.inboxes[name])==0, "Inbox created was empty")
#
for name in addedNames:
a.deleteInbox(name)
self.assert_( name not in a.inboxes, "Inbox was deleted")
#
self.assert_( 0==len(a.inboxes['control']) and 0==len(a.inboxes['inbox']) and 2==len(a.inboxes), "Only have default inboxes left")
#
def test_OutboxModification(self):
"-Acceptance Test - Check Addition and Deletion of Outboxes"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
addedNames = []
for i in [1,2,3,4]:
outboxNames = list(a.outboxes)
name=a.addOutbox("name")
self.assert_( name not in outboxNames, "Outbox added had a new name")
outboxNames = list(a.outboxes)
self.assert_( name in outboxNames, "Outbox was added")
addedNames.append(name)
self.assert_( len(a.outboxes[name])==0, "Outbox created was empty")
#
for name in addedNames:
a.deleteOutbox(name)
self.assert_( name not in a.outboxes, "Outbox was deleted")
#
self.assert_( 0==len(a.outboxes['signal']) and 0==len(a.outboxes['outbox']) and 2==len(a.outboxes), "Only have one outbox left")
def test_deleteInbox_invalidInbox(self):
"deleteInbox - KeyError exception raised if you try to delete an inbox that does not exist - this includes the case of an already deleted Inbox"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
self.assertRaises(KeyError, a.deleteInbox,"NonExistantInbox")
def test_deleteInbox_validInbox(self):
"deleteInbox - Deletes the named inbox"
import random
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
# Pick a random inbox to delete
inboxNames=list(a.inboxes)
inboxNames.sort()
box=inboxNames[random.randint(0,len(inboxNames)-1)]
a.deleteInbox(box)
newinboxNames=list(a.inboxes)
newinboxNames.sort()
self.assertNotEqual(inboxNames,newinboxNames,"Inboxes were changed")
self.assert_(box not in newinboxNames, "Inbox "+box+"was deleted")
def test_deleteOutbox_invalidOutbox(self):
"deleteOutbox - KeyError exception raised if you try to delete an outbox that does not exist - this includes the case of an already deleted Outbox"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
self.assertRaises(KeyError, a.deleteOutbox,"NonExistantInbox")
def test_deleteOutbox_validOutbox(self):
"deleteOutbox - Deletes the named outbox"
import random
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
# Pick a random inbox to delete
outboxNames=list(a.outboxes)
outboxNames.sort()
box=outboxNames[random.randint(0,len(outboxNames)-1)]
a.deleteOutbox(box)
newoutboxNames=list(a.outboxes)
newoutboxNames.sort()
self.assertNotEqual(outboxNames,newoutboxNames,"Outboxes were changed")
self.assert_(box not in newoutboxNames, "Outbox "+box+"was deleted")
def test_trackResource_validDefaultInbox(self):
"trackResource,retrieveTrackedResource - Adds to & retrieves from the mapping of inbox -> resource to a local store. This allows retrieval of the resource based on which inbox messages arrive on. Whilst designed for custom inboxes, it should work with the 'default' inboxes for a component"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
resource = "resource"
inboxResourceAssociatedWith = "inbox"
a.trackResource(resource, inboxResourceAssociatedWith)
storedResource=a.retrieveTrackedResource(inboxResourceAssociatedWith)
self.assertEqual(resource, storedResource, "The resource was correctly stored")
def test_trackResource_validDynamicInbox(self):
"trackResource,retrieveTrackedResource - Tracking resources using a custom dynamic inbox name should work."
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
resource = "resource"
inboxResourceAssociatedWith = a.addInbox("dynamic")
a.trackResource(resource, inboxResourceAssociatedWith)
storedResource=a.retrieveTrackedResource(inboxResourceAssociatedWith)
self.assertEqual(resource, storedResource, "The resource was correctly stored")
def test_trackResource_invalidInbox(self):
"trackResource,retrieveTrackedResource - Tracking resources using an invalid inbox name should fail."
import time
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
resource = "resource"
inboxResourceAssociatedWith = a.addInbox("name")+str(time.time()) # Ensure non-existant
self.assertRaises(KeyError, a.trackResource, resource, inboxResourceAssociatedWith)
def test_trackResourceInformation_validDefaultBoxes(self):
"trackResourceInformation, retrieveTrackedResourceInformation - Associates communication & user aspects related to a resource. Associating default in/out boxes with a resource is valid"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
inboxes=list(a.inboxes)
outboxes=list(a.outboxes)
arbitraryInformation="qwertyuiopasdfghjklzxcvbnm"
resource = "resource"
a.trackResourceInformation(resource,inboxes,outboxes,arbitraryInformation)
storedInboxes, storedOutboxes, storedInformation = a.retrieveTrackedResourceInformation(resource)
self.assertEqual(inboxes, storedInboxes, "We can retrieve the right set of inboxes")
self.assertEqual(outboxes, storedOutboxes, "We can retrieve the right set of outboxes")
self.assertEqual(arbitraryInformation, storedInformation, "We can retrieve the right arbitrary information")
def test_trackResourceInformation_validDynamicBoxes(self):
"trackResourceInformation, retrieveTrackedResourceInformation - Associates communication & user aspects related to a resource. Associating dynamically created in/out boxes with a resource is the default"
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
inboxes = [ a.addInbox("dynamic"),a.addInbox("dynamic"),a.addInbox("dynamic") ]
outboxes = [ a.addOutbox("dynamic"),a.addOutbox("dynamic"),a.addOutbox("dynamic") ]
arbitraryInformation="qwertyuiopasdfghjklzxcvbnm"
resource = "resource"
a.trackResourceInformation(resource,inboxes,outboxes,arbitraryInformation)
storedInboxes, storedOutboxes, storedInformation = a.retrieveTrackedResourceInformation(resource)
self.assertEqual(inboxes, storedInboxes, "We can retrieve the right set of inboxes")
self.assertEqual(outboxes, storedOutboxes, "We can retrieve the right set of outboxes")
self.assertEqual(arbitraryInformation, storedInformation, "We can retrieve the right arbitrary information")
def test_trackResourceInformation_invalidInboxes(self):
"trackResourceInformation, retrieveTrackedResourceInformation - Tracking invalid inboxes using a resource fails."
import time
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
inboxes = [ a.addInbox("name")+str(time.time()) ] # This list of inboxes is "guaranteed" not to exist
outboxes = [ a.addOutbox("dynamic") ] # List of "guaranteed" valid outboxes
arbitraryInformation="qwertyuiopasdfghjklzxcvbnm"
resource = "resource"
self.assertRaises(KeyError, a.trackResourceInformation, resource,inboxes,outboxes,arbitraryInformation)
def test_trackResourceInformation_invalidOutboxes(self):
"trackResourceInformation, retrieveTrackedResourceInformation - Tracking invalid outboxes using a resource fails."
import time
a=AdaptiveCommsComponent.AdaptiveCommsComponent()
inboxes = [ a.addInbox("dynamic") ] # List of "guaranteed" valid outboxes
outboxes = [ a.addOutbox("name")+str(time.time()) ] # This list of inboxes is "guaranteed" not to exist
arbitraryInformation="qwertyuiopasdfghjklzxcvbnm"
resource = "resource"
self.assertRaises(KeyError, a.trackResourceInformation, resource,inboxes,outboxes,arbitraryInformation)
def _test_Notes(self):
"-XXXXX mps 29/5/3- Check that subclasses operate correctly (Needs writing)"
pass
# print "Not written"
if __name__=="__main__":
unittest.main()
|
rhelmer/socorro
|
refs/heads/master
|
webapp-django/crashstats/signature/__init__.py
|
12133432
| |
famulus/aubio
|
refs/heads/master
|
python/demos/demo_specdesc.py
|
9
|
#! /usr/bin/env python
import sys
from aubio import fvec, source, pvoc, specdesc
from numpy import hstack
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
if len(sys.argv) < 2:
print "Usage: %s <filename> [samplerate]" % sys.argv[0]
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
pv = pvoc(win_s, hop_s)
methods = ['default', 'energy', 'hfc', 'complex', 'phase', 'specdiff', 'kl',
'mkl', 'specflux', 'centroid', 'slope', 'rolloff', 'spread', 'skewness',
'kurtosis', 'decrease',]
all_descs = {}
o = {}
for method in methods:
cands = []
all_descs[method] = fvec(0)
o[method] = specdesc(method, win_s)
total_frames = 0
downsample = 2
while True:
samples, read = s()
fftgrain = pv(samples)
#print "%f" % ( total_frames / float(samplerate) ),
for method in methods:
specdesc_val = o[method](fftgrain)[0]
all_descs[method] = hstack ( [all_descs[method], specdesc_val] )
#print "%f" % specdesc_val,
#print
total_frames += read
if read < hop_s: break
if 1:
print "done computing, now plotting"
import matplotlib.pyplot as plt
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot(filename, samplerate, block_size = hop_s, ax = wave )
wave.yaxis.set_visible(False)
wave.xaxis.set_visible(False)
all_desc_times = [ x * hop_s for x in range(len(all_descs["default"])) ]
n_methods = len(methods)
for i, method in enumerate(methods):
#ax = fig.add_subplot (n_methods, 1, i)
#plt2 = plt.axes([0.1, 0.1, 0.8, 0.65], sharex = plt1)
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_methods), 0.8, 0.65 / n_methods], sharex = wave )
ax.plot(all_desc_times, all_descs[method], '-', label = method)
#ax.set_ylabel(method, rotation = 0)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.axis(xmax = all_desc_times[-1], xmin = all_desc_times[0])
ax.annotate(method, xy=(-10, 0), xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
)
set_xlabels_sample2time(ax, all_desc_times[-1], samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
plt.show()
|
insomnia-lab/calibre
|
refs/heads/master
|
src/calibre/ebooks/readability/readability.py
|
10
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import re, sys
from collections import defaultdict
from lxml.etree import tostring
from lxml.html import (fragment_fromstring, document_fromstring,
tostring as htostring)
from calibre.ebooks.readability.htmls import build_doc, get_body, get_title, shorten_title
from calibre.ebooks.readability.cleaners import html_cleaner, clean_attributes
def tounicode(tree_or_node, **kwargs):
kwargs['encoding'] = unicode
return htostring(tree_or_node, **kwargs)
REGEXES = {
'unlikelyCandidatesRe': re.compile('combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter',re.I),
'okMaybeItsACandidateRe': re.compile('and|article|body|column|main|shadow',re.I),
'positiveRe': re.compile('article|body|content|entry|hentry|main|page|pagination|post|text|blog|story',re.I),
'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget',re.I),
'divToPElementsRe': re.compile('<(a|blockquote|dl|div|img|ol|p|pre|table|ul)',re.I),
#'replaceBrsRe': re.compile('(<br[^>]*>[ \n\r\t]*){2,}',re.I),
#'replaceFontsRe': re.compile('<(\/?)font[^>]*>',re.I),
#'trimRe': re.compile('^\s+|\s+$/'),
#'normalizeRe': re.compile('\s{2,}/'),
#'killBreaksRe': re.compile('(<br\s*\/?>(\s| ?)*){1,}/'),
#'videoRe': re.compile('http:\/\/(www\.)?(youtube|vimeo)\.com', re.I),
#skipFootnoteLink: /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i,
}
def describe(node, depth=1):
if not hasattr(node, 'tag'):
return "[%s]" % type(node)
name = node.tag
if node.get('id', ''): name += '#'+node.get('id')
if node.get('class', ''):
name += '.' + node.get('class').replace(' ','.')
if name[:4] in ['div#', 'div.']:
name = name[3:]
if depth and node.getparent() is not None:
return name+' - '+describe(node.getparent(), depth-1)
return name
def to_int(x):
if not x: return None
x = x.strip()
if x.endswith('px'):
return int(x[:-2])
if x.endswith('em'):
return int(x[:-2]) * 12
return int(x)
def clean(text):
text = re.sub('\s*\n\s*', '\n', text)
text = re.sub('[ \t]{2,}', ' ', text)
return text.strip()
def text_length(i):
return len(clean(i.text_content() or ""))
class Unparseable(ValueError):
pass
class Document:
TEXT_LENGTH_THRESHOLD = 25
RETRY_LENGTH = 250
def __init__(self, input, log, **options):
self.input = input
self.options = defaultdict(lambda: None)
for k, v in options.items():
self.options[k] = v
self.html = None
self.log = log
self.keep_elements = set()
def _html(self, force=False):
if force or self.html is None:
self.html = self._parse(self.input)
path = self.options['keep_elements']
if path is not None:
self.keep_elements = set(self.html.xpath(path))
return self.html
def _parse(self, input):
doc = build_doc(input)
doc = html_cleaner.clean_html(doc)
base_href = self.options['url']
if base_href:
doc.make_links_absolute(base_href, resolve_base_href=True)
else:
doc.resolve_base_href()
return doc
def content(self):
return get_body(self._html(True))
def title(self):
return get_title(self._html(True))
def short_title(self):
return shorten_title(self._html(True))
def summary(self):
try:
ruthless = True
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidate = self.select_best_candidate(candidates)
if best_candidate:
article = self.get_article(candidates, best_candidate)
else:
if ruthless:
self.log.debug("ruthless removal did not work. ")
ruthless = False
self.debug("ended up stripping too much - going for a safer _parse")
# try again
continue
else:
self.log.debug("Ruthless and lenient parsing did not work. Returning raw html")
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
of_acceptable_length = len(cleaned_article or '') >= (self.options['retry_length'] or self.RETRY_LENGTH)
if ruthless and not of_acceptable_length:
ruthless = False
continue # try again
else:
return cleaned_article
except StandardError, e:
self.log.exception('error getting summary: ' )
raise Unparseable(str(e)), None, sys.exc_info()[2]
def get_article(self, candidates, best_candidate):
# Now that we have the top candidate, look through its siblings for content that might also be related.
# Things like preambles, content split by ads that we removed, etc.
sibling_score_threshold = max([10, best_candidate['content_score'] * 0.2])
output = document_fromstring('<div/>')
parent = output.xpath('//div')[0]
best_elem = best_candidate['elem']
for sibling in best_elem.getparent().getchildren():
#if isinstance(sibling, NavigableString): continue#in lxml there no concept of simple text
append = False
if sibling is best_elem:
append = True
if sibling in candidates and candidates[sibling]['content_score'] >= sibling_score_threshold:
append = True
if sibling in self.keep_elements:
append = True
if sibling.tag == "p":
link_density = self.get_link_density(sibling)
node_content = sibling.text or ""
node_length = len(node_content)
if node_length > 80 and link_density < 0.25:
append = True
elif node_length < 80 and link_density == 0 and re.search('\.( |$)', node_content):
append = True
if append:
parent.append(sibling)
#if output is not None:
# output.append(best_elem)
return output.find('body')
def select_best_candidate(self, candidates):
sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True)
for candidate in sorted_candidates[:5]:
elem = candidate['elem']
self.debug("Top 5 : %6.3f %s" % (candidate['content_score'], describe(elem)))
if len(sorted_candidates) == 0:
return None
best_candidate = sorted_candidates[0]
return best_candidate
def get_link_density(self, elem):
link_length = 0
for i in elem.findall(".//a"):
link_length += text_length(i)
#if len(elem.findall(".//div") or elem.findall(".//p")):
# link_length = link_length
total_length = text_length(elem)
return float(link_length) / max(total_length, 1)
def score_paragraphs(self, ):
MIN_LEN = self.options.get('min_text_length', self.TEXT_LENGTH_THRESHOLD)
candidates = {}
#self.debug(str([describe(node) for node in self.tags(self.html, "div")]))
ordered = []
for elem in self.tags(self.html, "p", "pre", "td"):
parent_node = elem.getparent()
if parent_node is None:
continue
grand_parent_node = parent_node.getparent()
inner_text = clean(elem.text_content() or "")
inner_text_len = len(inner_text)
# If this paragraph is less than 25 characters, don't even count it.
if inner_text_len < MIN_LEN:
continue
if parent_node not in candidates:
candidates[parent_node] = self.score_node(parent_node)
ordered.append(parent_node)
if grand_parent_node is not None and grand_parent_node not in candidates:
candidates[grand_parent_node] = self.score_node(grand_parent_node)
ordered.append(grand_parent_node)
content_score = 1
content_score += len(inner_text.split(','))
content_score += min((inner_text_len / 100), 3)
#if elem not in candidates:
# candidates[elem] = self.score_node(elem)
#WTF? candidates[elem]['content_score'] += content_score
candidates[parent_node]['content_score'] += content_score
if grand_parent_node is not None:
candidates[grand_parent_node]['content_score'] += content_score / 2.0
# Scale the final candidates score based on link density. Good content should have a
# relatively small link density (5% or less) and be mostly unaffected by this operation.
for elem in ordered:
candidate = candidates[elem]
ld = self.get_link_density(elem)
score = candidate['content_score']
self.debug("Candid: %6.3f %s link density %.3f -> %6.3f" % (score, describe(elem), ld, score*(1-ld)))
candidate['content_score'] *= (1 - ld)
return candidates
def class_weight(self, e):
weight = 0
if e.get('class', None):
if REGEXES['negativeRe'].search(e.get('class')):
weight -= 25
if REGEXES['positiveRe'].search(e.get('class')):
weight += 25
if e.get('id', None):
if REGEXES['negativeRe'].search(e.get('id')):
weight -= 25
if REGEXES['positiveRe'].search(e.get('id')):
weight += 25
return weight
def score_node(self, elem):
content_score = self.class_weight(elem)
name = elem.tag.lower()
if name == "div":
content_score += 5
elif name in ["pre", "td", "blockquote"]:
content_score += 3
elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]:
content_score -= 3
elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]:
content_score -= 5
return {
'content_score': content_score,
'elem': elem
}
def debug(self, *a):
#if self.options['debug']:
self.log.debug(*a)
def remove_unlikely_candidates(self):
for elem in self.html.iter():
if elem in self.keep_elements:
continue
s = "%s %s" % (elem.get('class', ''), elem.get('id', ''))
#self.debug(s)
if REGEXES['unlikelyCandidatesRe'].search(s) and (not REGEXES['okMaybeItsACandidateRe'].search(s)) and elem.tag != 'body':
self.debug("Removing unlikely candidate - %s" % describe(elem))
elem.drop_tree()
def transform_misused_divs_into_paragraphs(self):
for elem in self.tags(self.html, 'div'):
# transform <div>s that do not contain other block elements into <p>s
if not REGEXES['divToPElementsRe'].search(unicode(''.join(map(tostring, list(elem))))):
#self.debug("Altering %s to p" % (describe(elem)))
elem.tag = "p"
#print "Fixed element "+describe(elem)
for elem in self.tags(self.html, 'div'):
if elem.text and elem.text.strip():
p = fragment_fromstring('<p/>')
p.text = elem.text
elem.text = None
elem.insert(0, p)
#print "Appended "+tounicode(p)+" to "+describe(elem)
for pos, child in reversed(list(enumerate(elem))):
if child.tail and child.tail.strip():
p = fragment_fromstring('<p/>')
p.text = child.tail
child.tail = None
elem.insert(pos + 1, p)
#print "Inserted "+tounicode(p)+" to "+describe(elem)
if child.tag == 'br':
#print 'Dropped <br> at '+describe(elem)
child.drop_tree()
def tags(self, node, *tag_names):
for tag_name in tag_names:
for e in node.findall('.//%s' % tag_name):
yield e
def reverse_tags(self, node, *tag_names):
for tag_name in tag_names:
for e in reversed(node.findall('.//%s' % tag_name)):
yield e
def sanitize(self, node, candidates):
MIN_LEN = self.options.get('min_text_length', self.TEXT_LENGTH_THRESHOLD)
for header in self.tags(node, "h1", "h2", "h3", "h4", "h5", "h6"):
if self.class_weight(header) < 0 or self.get_link_density(header) > 0.33:
header.drop_tree()
for elem in self.tags(node, "form", "iframe", "textarea"):
elem.drop_tree()
allowed = {}
# Conditionally clean <table>s, <ul>s, and <div>s
for el in self.reverse_tags(node, "table", "ul", "div"):
if el in allowed or el in self.keep_elements:
continue
weight = self.class_weight(el)
if el in candidates:
content_score = candidates[el]['content_score']
#print '!',el, '-> %6.3f' % content_score
else:
content_score = 0
tag = el.tag
if weight + content_score < 0:
self.debug("Cleaned %s with score %6.3f and weight %-3s" %
(describe(el), content_score, weight, ))
el.drop_tree()
elif el.text_content().count(",") < 10:
counts = {}
for kind in ['p', 'img', 'li', 'a', 'embed', 'input']:
counts[kind] = len(el.findall('.//%s' %kind))
counts["li"] -= 100
content_length = text_length(el) # Count the text length excluding any surrounding whitespace
link_density = self.get_link_density(el)
parent_node = el.getparent()
if parent_node is not None:
if parent_node in candidates:
content_score = candidates[parent_node]['content_score']
else:
content_score = 0
#if parent_node is not None:
#pweight = self.class_weight(parent_node) + content_score
#pname = describe(parent_node)
#else:
#pweight = 0
#pname = "no parent"
to_remove = False
reason = ""
#if el.tag == 'div' and counts["img"] >= 1:
# continue
if counts["p"] and counts["img"] > counts["p"]:
reason = "too many images (%s)" % counts["img"]
to_remove = True
elif counts["li"] > counts["p"] and tag != "ul" and tag != "ol":
reason = "more <li>s than <p>s"
to_remove = True
elif counts["input"] > (counts["p"] / 3):
reason = "less than 3x <p>s than <input>s"
to_remove = True
elif content_length < (MIN_LEN) and (counts["img"] == 0 or counts["img"] > 2):
reason = "too short content length %s without a single image" % content_length
to_remove = True
elif weight < 25 and link_density > 0.2:
reason = "too many links %.3f for its weight %s" % (link_density, weight)
to_remove = True
elif weight >= 25 and link_density > 0.5:
reason = "too many links %.3f for its weight %s" % (link_density, weight)
to_remove = True
elif (counts["embed"] == 1 and content_length < 75) or counts["embed"] > 1:
reason = "<embed>s with too short content length, or too many <embed>s"
to_remove = True
# if el.tag == 'div' and counts['img'] >= 1 and to_remove:
# imgs = el.findall('.//img')
# valid_img = False
# self.debug(tounicode(el))
# for img in imgs:
#
# height = img.get('height')
# text_length = img.get('text_length')
# self.debug ("height %s text_length %s" %(repr(height), repr(text_length)))
# if to_int(height) >= 100 or to_int(text_length) >= 100:
# valid_img = True
# self.debug("valid image" + tounicode(img))
# break
# if valid_img:
# to_remove = False
# self.debug("Allowing %s" %el.text_content())
# for desnode in self.tags(el, "table", "ul", "div"):
# allowed[desnode] = True
#find x non empty preceding and succeeding siblings
i, j = 0, 0
x = 1
siblings = []
for sib in el.itersiblings():
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
i =+ 1
siblings.append(sib_content_length)
if i == x:
break
for sib in el.itersiblings(preceding=True):
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
j =+ 1
siblings.append(sib_content_length)
if j == x:
break
#self.debug(str(siblings))
if siblings and sum(siblings) > 1000 :
to_remove = False
self.debug("Allowing %s" % describe(el))
for desnode in self.tags(el, "table", "ul", "div"):
allowed[desnode] = True
if to_remove:
self.debug("Cleaned %6.3f %s with weight %s cause it has %s." %
(content_score, describe(el), weight, reason))
#print tounicode(el)
#self.debug("pname %s pweight %.3f" %(pname, pweight))
el.drop_tree()
return clean_attributes(tounicode(node))
def option_parser():
from calibre.utils.config import OptionParser
parser = OptionParser(usage='%prog: [options] file')
parser.add_option('-v', '--verbose', default=False, action='store_true',
dest='verbose',
help='Show detailed output information. Useful for debugging')
parser.add_option('-k', '--keep-elements', default=None, action='store',
dest='keep_elements',
help='XPath specifying elements that should not be removed')
return parser
def main():
from calibre.utils.logging import default_log
parser = option_parser()
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
raise SystemExit(1)
with open(args[0], 'rb') as f:
raw = f.read()
enc = sys.__stdout__.encoding or 'utf-8'
if options.verbose:
default_log.filter_level = default_log.DEBUG
print (Document(raw, default_log,
debug=options.verbose,
keep_elements=options.keep_elements).summary().encode(enc,
'replace'))
if __name__ == '__main__':
main()
|
bigdatauniversity/edx-platform
|
refs/heads/master
|
lms/djangoapps/course_blocks/transformers/start_date.py
|
32
|
"""
Start Date Transformer implementation.
"""
from openedx.core.lib.block_cache.transformer import BlockStructureTransformer
from lms.djangoapps.courseware.access_utils import check_start_date
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from .utils import get_field_on_block
class StartDateTransformer(BlockStructureTransformer):
"""
A transformer that enforces the 'start' and 'days_early_for_beta'
fields on blocks by removing blocks from the block structure for
which the user does not have access. The 'start' field on a
block is percolated down to its descendants, so that all blocks
enforce the 'start' field from their ancestors. The assumed
'start' value for a block is then the maximum of its parent and its
own.
For a block with multiple parents, the assumed parent start date
value is a computed minimum of the start dates of all its parents.
So as long as one parent chain allows access, the block has access.
Staff users are exempted from visibility rules.
"""
VERSION = 1
MERGED_START_DATE = 'merged_start_date'
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "start_date"
@classmethod
def get_merged_start_date(cls, block_structure, block_key):
"""
Returns the merged value for the start date for the block with
the given block_key in the given block_structure.
"""
return block_structure.get_transformer_block_field(
block_key, cls, cls.MERGED_START_DATE, False
)
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
block_structure.request_xblock_fields('days_early_for_beta')
for block_key in block_structure.topological_traversal():
# compute merged value of start date from all parents
parents = block_structure.get_parents(block_key)
min_all_parents_start_date = min(
cls.get_merged_start_date(block_structure, parent_key)
for parent_key in parents
) if parents else None
# set the merged value for this block
block_start = get_field_on_block(block_structure.get_xblock(block_key), 'start')
if min_all_parents_start_date is None:
# no parents so just use value on block or default
merged_start_value = block_start or DEFAULT_START_DATE
elif not block_start:
# no value on this block so take value from parents
merged_start_value = min_all_parents_start_date
else:
# max of merged-start-from-all-parents and this block
merged_start_value = max(min_all_parents_start_date, block_start)
block_structure.set_transformer_block_field(
block_key,
cls,
cls.MERGED_START_DATE,
merged_start_value
)
def transform(self, usage_info, block_structure):
"""
Mutates block_structure based on the given usage_info.
"""
# Users with staff access bypass the Start Date check.
if usage_info.has_staff_access:
return
block_structure.remove_block_if(
lambda block_key: not check_start_date(
usage_info.user,
block_structure.get_xblock_field(block_key, 'days_early_for_beta'),
self.get_merged_start_date(block_structure, block_key),
usage_info.course_key,
)
)
|
hdinsight/hue
|
refs/heads/master
|
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Cipher/test_ARC4.py
|
117
|
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/ARC4.py: Self-test for the Alleged-RC4 cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.ARC4"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key[, description]) tuples.
test_data = [
# Test vectors from Eric Rescorla's message with the subject
# "RC4 compatibility testing", sent to the cipherpunks mailing list on
# September 13, 1994.
# http://cypherpunks.venona.com/date/1994/09/msg00420.html
('0123456789abcdef', '75b7878099e0c596', '0123456789abcdef',
'Test vector 0'),
('0000000000000000', '7494c2e7104b0879', '0123456789abcdef',
'Test vector 1'),
('0000000000000000', 'de188941a3375d3a', '0000000000000000',
'Test vector 2'),
('00000000000000000000', 'd6a141a7ec3c38dfbd61', 'ef012345',
'Test vector 3'),
('01' * 512,
'7595c3e6114a09780c4ad452338e1ffd9a1be9498f813d76533449b6778dcad8'
+ 'c78a8d2ba9ac66085d0e53d59c26c2d1c490c1ebbe0ce66d1b6b1b13b6b919b8'
+ '47c25a91447a95e75e4ef16779cde8bf0a95850e32af9689444fd377108f98fd'
+ 'cbd4e726567500990bcc7e0ca3c4aaa304a387d20f3b8fbbcd42a1bd311d7a43'
+ '03dda5ab078896ae80c18b0af66dff319616eb784e495ad2ce90d7f772a81747'
+ 'b65f62093b1e0db9e5ba532fafec47508323e671327df9444432cb7367cec82f'
+ '5d44c0d00b67d650a075cd4b70dedd77eb9b10231b6b5b741347396d62897421'
+ 'd43df9b42e446e358e9c11a9b2184ecbef0cd8e7a877ef968f1390ec9b3d35a5'
+ '585cb009290e2fcde7b5ec66d9084be44055a619d9dd7fc3166f9487f7cb2729'
+ '12426445998514c15d53a18c864ce3a2b7555793988126520eacf2e3066e230c'
+ '91bee4dd5304f5fd0405b35bd99c73135d3d9bc335ee049ef69b3867bf2d7bd1'
+ 'eaa595d8bfc0066ff8d31509eb0c6caa006c807a623ef84c3d33c195d23ee320'
+ 'c40de0558157c822d4b8c569d849aed59d4e0fd7f379586b4b7ff684ed6a189f'
+ '7486d49b9c4bad9ba24b96abf924372c8a8fffb10d55354900a77a3db5f205e1'
+ 'b99fcd8660863a159ad4abe40fa48934163ddde542a6585540fd683cbfd8c00f'
+ '12129a284deacc4cdefe58be7137541c047126c8d49e2755ab181ab7e940b0c0',
'0123456789abcdef',
"Test vector 4"),
]
def get_tests(config={}):
from Crypto.Cipher import ARC4
from common import make_stream_tests
return make_stream_tests(ARC4, "ARC4", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
mlperf/training_results_v0.6
|
refs/heads/master
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/test/test_backend_test.py
|
1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import os
import unittest
import onnx.backend.base
import onnx.backend.test
from onnx.backend.base import Device, DeviceType
from onnx.backend.test.runner import BackendIsNotSupposedToImplementIt
import onnx.shape_inference
from typing import Optional, Text, Any, Tuple, Sequence
from onnx import NodeProto, ModelProto, TensorProto
import numpy # type: ignore
# The following just executes the fake backend through the backend test
# infrastructure. Since we don't have full reference implementation of all ops
# in ONNX repo, it's impossible to produce the proper results. However, we can
# run 'checker' (that's what base Backend class does) to verify that all tests
# fed are actually well-formed ONNX models.
#
# If everything is fine, all the tests would be marked as "skipped".
#
# We don't enable report in this test because the report collection logic itself
# fails when models are mal-formed.
class DummyBackend(onnx.backend.base.Backend):
@classmethod
def prepare(cls,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> Optional[onnx.backend.base.BackendRep]
super(DummyBackend, cls).prepare(model, device, **kwargs)
# test shape inference
model = onnx.shape_inference.infer_shapes(model)
value_infos = {vi.name: vi for vi in itertools.chain(model.graph.value_info, model.graph.output)}
if do_enforce_shape_inference_coverage(model):
for node in model.graph.node:
for i, output in enumerate(node.output):
if node.op_type == 'Dropout' and i != 0:
continue
assert output in value_infos
tt = value_infos[output].type.tensor_type
assert tt.elem_type != TensorProto.UNDEFINED
for dim in tt.shape.dim:
assert dim.WhichOneof('value') == 'dim_value'
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker")
@classmethod
def run_node(cls,
node, # type: NodeProto
inputs, # type: Any
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
**kwargs # type: Any
): # type: (...) -> Optional[Tuple[Any, ...]]
super(DummyBackend, cls).run_node(node, inputs, device=device, outputs_info=outputs_info)
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker")
@classmethod
def supports_device(cls, device): # type: (Text) -> bool
d = Device(device)
if d.type == DeviceType.CPU:
return True
return False
shape_coverage_whitelist = set(
['bvlc_alexnet', 'densenet121', 'inception_v1', 'inception_v2',
'resnet50', 'shufflenet', 'SingleRelu', 'squeezenet_old', 'vgg19', 'zfnet'])
def do_enforce_shape_inference_coverage(model): # type: (ModelProto) -> bool
if model.graph.name not in shape_coverage_whitelist:
return False
for node in model.graph.node:
if node.op_type in set(['RNN', 'LSTM', 'GRU']):
return False
return True
backend_test = onnx.backend.test.BackendTest(DummyBackend, __name__)
if os.getenv('APPVEYOR'):
backend_test.exclude(r'(test_vgg19|test_zfnet)')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test
.test_cases)
if __name__ == '__main__':
unittest.main()
|
meidli/yabgp
|
refs/heads/master
|
yabgp/config.py
|
2
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" basic config """
import logging
import sys
from oslo_config import cfg
CONF = cfg.CONF
BGP_CONFIG_OPTS = [
cfg.IntOpt('peer_start_interval',
default=10,
help='The interval to start each BGP peer'),
cfg.BoolOpt('four_bytes_as',
default=True,
help='If support 4bytes AS'),
cfg.BoolOpt('route_refresh',
default=True,
help='If support sending and receiving route refresh message'),
cfg.BoolOpt('cisco_route_refresh',
default=True,
help='If support sending and receiving cisco route refresh message'),
cfg.BoolOpt('enhanced_route_refresh',
default=True,
help='If support enhanced route refresh'),
cfg.StrOpt('add_path',
choices=['ipv4_send', 'ipv4_receive', 'ipv4_both'],
help='BGP additional path feature and supported address family'),
cfg.BoolOpt('graceful_restart',
default=True,
help='if support graceful restart'),
cfg.BoolOpt('cisco_multi_session',
default=True,
help='if support cisco multi session'),
cfg.DictOpt('running_config',
default={},
help='The running configuration for BGP')
]
CONF.register_opts(BGP_CONFIG_OPTS, group='bgp')
BGP_PEER_CONFIG_OPTS = [
cfg.IntOpt('remote_as',
help='The remote BGP peer AS number'),
cfg.IntOpt('local_as',
help='The Local BGP AS number'),
cfg.IPOpt('remote_addr',
help='The remote address of the peer'),
cfg.IPOpt('local_addr',
default='0.0.0.0',
help='The local address of the BGP'),
cfg.StrOpt('md5',
help='The MD5 string use to auth',
secret=True),
cfg.ListOpt('afi_safi',
default=['ipv4'],
help='The Global config for address family and sub address family'),
cfg.BoolOpt(
'rib',
default=False,
help='maintain rib in or not, default is False'
)
]
CONF.register_cli_opts(BGP_PEER_CONFIG_OPTS, group='bgp')
BGP_PEER_TIME_OPTS = [
cfg.IntOpt('connect_retry_time',
default=30,
help='Connect retry timer'),
cfg.IntOpt('hold_time',
default=180,
help='Hold timer'),
cfg.IntOpt('keep_alive_time',
default=60,
help='Keepalive timer'),
cfg.IntOpt('delay_open_time',
default=10,
help='Delay open timer'),
cfg.IntOpt('idle_hold_time',
default=30,
help='Idle hold timer')
]
CONF.register_cli_opts(BGP_PEER_TIME_OPTS, group='time')
LOG = logging.getLogger(__name__)
def get_bgp_config():
"""
Get BGP running config
:return:
"""
# check bgp configuration from CLI input
LOG.info('Try to load BGP configuration from CLI input')
if CONF.bgp.local_as and CONF.bgp.remote_as and CONF.bgp.local_addr and CONF.bgp.remote_addr:
CONF.bgp.running_config = {
'remote_as': CONF.bgp.remote_as,
'remote_addr': CONF.bgp.remote_addr,
'local_as': CONF.bgp.local_as,
'local_addr': CONF.bgp.local_addr,
'md5': CONF.bgp.md5,
'afi_safi': CONF.bgp.afi_safi,
'capability': {
'local': {
'four_bytes_as': CONF.bgp.four_bytes_as,
'route_refresh': CONF.bgp.route_refresh,
'cisco_route_refresh': CONF.bgp.cisco_route_refresh,
'enhanced_route_refresh': CONF.bgp.enhanced_route_refresh,
'graceful_restart': CONF.bgp.graceful_restart,
'cisco_multi_session': CONF.bgp.cisco_multi_session,
'add_path': CONF.bgp.add_path},
'remote': {}
}
}
else:
LOG.error('Please provide enough parameters!')
sys.exit()
|
fosfataza/protwis
|
refs/heads/master
|
contactnetwork/forms.py
|
6
|
from django import forms
class PDBform(forms.Form):
pdbname = forms.CharField(max_length=10, required=False)
file = forms.FileField(label='Select a file', help_text='max. 42 megabytes', required=False)
|
Taapat/enigma2-openpli-vuplus
|
refs/heads/master
|
RecordTimer.py
|
6
|
import os
from enigma import eEPGCache, getBestPlayableServiceReference, eStreamServer, eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.SystemInfo import SystemInfo
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False, zap_wakeup = "always", rename_repeat = True, conflict_detection = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.conflict_detection = conflict_detection
self.setAdvancedPriorityFrontend = None
if SystemInfo["DVB-T_priority_tuner_available"] or SystemInfo["DVB-C_priority_tuner_available"] or SystemInfo["DVB-S_priority_tuner_available"] or SystemInfo["ATSC_priority_tuner_available"]:
rec_ref = self.service_ref and self.service_ref.ref
str_service = rec_ref and rec_ref.toString()
if str_service and '%3a//' not in str_service and not str_service.rsplit(":", 1)[1].startswith("/"):
type_service = rec_ref.getUnsignedData(4) >> 16
if type_service == 0xEEEE:
if SystemInfo["DVB-T_priority_tuner_available"] and config.usage.recording_frontend_priority_dvbt.value != "-2":
if config.usage.recording_frontend_priority_dvbt.value != config.usage.frontend_priority.value:
self.setAdvancedPriorityFrontend = config.usage.recording_frontend_priority_dvbt.value
if SystemInfo["ATSC_priority_tuner_available"] and config.usage.recording_frontend_priority_atsc.value != "-2":
if config.usage.recording_frontend_priority_atsc.value != config.usage.frontend_priority.value:
self.setAdvancedPriorityFrontend = config.usage.recording_frontend_priority_atsc.value
elif type_service == 0xFFFF:
if SystemInfo["DVB-C_priority_tuner_available"] and config.usage.recording_frontend_priority_dvbc.value != "-2":
if config.usage.recording_frontend_priority_dvbc.value != config.usage.frontend_priority.value:
self.setAdvancedPriorityFrontend = config.usage.recording_frontend_priority_dvbc.value
if SystemInfo["ATSC_priority_tuner_available"] and config.usage.recording_frontend_priority_atsc.value != "-2":
if config.usage.recording_frontend_priority_atsc.value != config.usage.frontend_priority.value:
self.setAdvancedPriorityFrontend = config.usage.recording_frontend_priority_atsc.value
else:
if SystemInfo["DVB-S_priority_tuner_available"] and config.usage.recording_frontend_priority_dvbs.value != "-2":
if config.usage.recording_frontend_priority_dvbs.value != config.usage.frontend_priority.value:
self.setAdvancedPriorityFrontend = config.usage.recording_frontend_priority_dvbs.value
self.needChangePriorityFrontend = self.setAdvancedPriorityFrontend is not None or config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.InfoBarInstance = Screens.InfoBar.InfoBar.instance
self.ts_dialog = None
self.log_entries = []
self.flags = set()
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + self.record_service.getFilenameExtension(), self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + self.record_service.getFilenameExtension(), "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if eStreamServer.getInstance().getConnectedClients():
eStreamServer.getInstance().stopStream()
return False
if self.first_try_prepare or (self.ts_dialog is not None and not self.checkingTimeshiftRunning()):
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if self.always_zap:
return False
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
self.ts_dialog = None
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = (self.setAdvancedPriorityFrontend is not None and self.setAdvancedPriorityFrontend) or config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
self.setAdvancedPriorityFrontend = None
if elem is not None:
setPreferredTuner(int(elem))
def checkingTimeshiftRunning(self):
return config.usage.check_timeshift.value and self.InfoBarInstance and self.InfoBarInstance.timeshiftEnabled() and self.InfoBarInstance.timeshift_was_activated
def openChoiceActionBeforeZap(self):
if self.ts_dialog is None:
type = _("record")
if self.justplay:
type = _("zap")
elif self.always_zap:
type = _("zap and record")
message = _("You must switch to the service %s (%s - '%s')!\n") % (type, self.service_ref.getServiceName(), self.name)
if self.repeated:
message += _("Attention, this is repeated timer!\n")
message += _("Timeshift is running. Select an action.\n")
choice = [(_("Zap"), "zap"), (_("Don't zap and disable timer"), "disable"), (_("Don't zap and remove timer"), "remove")]
if not self.InfoBarInstance.save_timeshift_file:
choice.insert(1, (_("Save timeshift in movie dir and zap"), "save_movie"))
if self.InfoBarInstance.timeshiftActivated():
choice.insert(0, (_("Save timeshift and zap"), "save"))
else:
choice.insert(1, (_("Save timeshift and zap"), "save"))
else:
message += _("Reminder, you have chosen to save timeshift file.")
#if self.justplay or self.always_zap:
# choice.insert(2, (_("Don't zap"), "continue"))
choice.insert(2, (_("Don't zap"), "continue"))
def zapAction(choice):
start_zap = True
if choice:
if choice in ("zap", "save", "save_movie"):
self.log(8, "zap to recording service")
if choice in ("save", "save_movie"):
ts = self.InfoBarInstance.getTimeshift()
if ts and ts.isTimeshiftEnabled():
if choice =="save_movie":
self.InfoBarInstance.save_timeshift_in_movie_dir = True
self.InfoBarInstance.save_timeshift_file = True
ts.saveTimeshiftFile()
del ts
self.InfoBarInstance.saveTimeshiftFiles()
elif choice == "disable":
self.disable()
NavigationInstance.instance.RecordTimer.timeChanged(self)
start_zap = False
self.log(8, "zap canceled by the user, timer disabled")
elif choice == "remove":
start_zap = False
self.afterEvent = AFTEREVENT.NONE
NavigationInstance.instance.RecordTimer.removeEntry(self)
self.log(8, "zap canceled by the user, timer removed")
elif choice == "continue":
if self.justplay:
self.end = self.begin
start_zap = False
self.log(8, "zap canceled by the user")
if start_zap:
if not self.justplay:
self.setRecordingPreferredTuner()
self.failureCB(True)
else:
self.log(8, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
self.ts_dialog = self.InfoBarInstance.session.openWithCallback(zapAction, MessageBox, message, simple=True, list=choice, timeout=20)
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
self.ts_dialog = None
if answer:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
if not self.first_try_prepare and self.InfoBarInstance and hasattr(self.InfoBarInstance.session, 'pipshown') and self.InfoBarInstance.session.pipshown:
hasattr(self.InfoBarInstance, "showPiP") and self.InfoBarInstance.showPiP()
if hasattr(self.InfoBarInstance.session, 'pip'):
del self.InfoBarInstance.session.pip
self.InfoBarInstance.session.pipshown = False
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and not Screens.Standby.inStandby and self.InfoBarInstance and self.InfoBarInstance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
elif event == iRecordableService.evGstRecordEnded:
if self.repeated:
self.processRepeated(findRunningEvent = False)
NavigationInstance.instance.RecordTimer.doActivate(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
conflict_detection = long(xml.get("conflict_detection") or "1")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat, conflict_detection = conflict_detection)
entry.repeated = int(repeated)
flags = xml.get("flags")
if flags:
entry.flags = set(flags.encode("utf-8").split(' '))
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# correct wrong running timers
self.checkWrongRunningTimers()
# check for disabled timers, if time as passed set to completed
self.cleanupDisabled()
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
# If we want to keep done timers, re-insert in the active list
if config.recording.keep_timers.value > 0 and w not in self.processed_timers:
insort(self.processed_timers, w)
self.stateChanged(w)
def checkWrongRunningTimers(self):
now = time() + 100
if int(now) > 1072224000:
wrong_timers = [entry for entry in (self.processed_timers + self.timer_list) if entry.state in (1, 2) and entry.begin > now]
for timer in wrong_timers:
timer.state = RecordTimerEntry.StateWaiting
self.timeChanged(timer)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
checkit = False
timer_text = ""
for timer in root.findall("timer"):
newTimer = createTimer(timer)
conflict_list = self.record(newTimer, ignoreTSC=True, dosave=False, loadtimer=True)
if conflict_list:
checkit = True
if newTimer in conflict_list:
timer_text += _("\nTimer '%s' disabled!") % newTimer.name
if checkit:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!") + timer_text, type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
if timer.disabled:
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' conflict_detection="' + str(int(timer.conflict_detection)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
if timer.flags:
list.append(' flags="' + ' '.join([stringToXML(x) for x in timer.flags]) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True, loadtimer=False):
check_timer_list = self.timer_list[:]
timersanitycheck = TimerSanityCheck(check_timer_list,entry)
answer = None
if not timersanitycheck.check():
if not ignoreTSC:
print "[RecordTimer] timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "[RecordTimer] ignore timer conflict..."
if not dosave and loadtimer:
simulTimerList = timersanitycheck.getSimulTimerList()
if entry in simulTimerList:
entry.disabled = True
if entry in check_timer_list:
check_timer_list.remove(entry)
answer = simulTimerList
elif timersanitycheck.doubleCheck():
print "[RecordTimer] ignore double timer..."
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return answer
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
if entry in self.processed_timers:
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
|
lesserwhirls/scipy-cwt
|
refs/heads/cwt
|
scipy/sparse/linalg/isolve/utils.py
|
10
|
__docformat__ = "restructuredtext en"
__all__ = []
from warnings import warn
from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b, xtype=None):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, None}
initial guess to iterative method
b : array_like
right hand side
xtype : {'f', 'd', 'F', 'D', None}
dtype of the x vector
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError('A and b have incompatible dimensions')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
if isinstance(b,matrix):
x = asmatrix(x)
return x.reshape(b.shape)
if xtype is None:
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
else:
warn('Use of xtype argument is deprecated. '\
'Use LinearOperator( ... , dtype=xtype) instead.',\
DeprecationWarning)
if xtype == 0:
xtype = b.dtype.char
else:
if xtype not in 'fdFD':
raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")
b = asarray(b,dtype=xtype) #make b the same type as x
b = b.ravel()
if x0 is None:
x = zeros(N, dtype=xtype)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N,1) or x.shape == (N,)):
raise ValueError('A and x have incompatible dimensions')
x = x.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b, postprocess
|
tusharmakkar08/Diamond
|
refs/heads/master
|
src/collectors/ntpd/test/testntpd.py
|
31
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ntpd import NtpdCollector
##########################################################################
class TestNtpdCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NtpdCollector', {})
self.collector = NtpdCollector(config, None)
def test_import(self):
self.assertTrue(NtpdCollector)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data(self, publish_mock):
ntpq_data = Mock(
return_value=self.getFixture('ntpq').getvalue())
ntpdc_kerninfo_data = Mock(
return_value=self.getFixture('ntpdc_kerninfo').getvalue())
ntpdc_sysinfo_data = Mock(
return_value=self.getFixture('ntpdc_sysinfo').getvalue())
collector_mock = patch.multiple(
NtpdCollector,
get_ntpq_output=ntpq_data,
get_ntpdc_kerninfo_output=ntpdc_kerninfo_data,
get_ntpdc_sysinfo_output=ntpdc_sysinfo_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'jitter': 0.026,
'when': 39,
'stratum': 2,
'reach': 377,
'delay': 0.127,
'poll': 1024,
'max_error': 0.039793,
'est_error': 5.1e-05,
'frequency': -14.24,
'offset': -5.427e-06,
'root_distance': 0.07663,
'root_dispersion': 0.09311
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
ntpq_data = Mock(return_value='')
ntpdc_kerninfo_data = Mock(return_value='')
ntpdc_sysinfo_data = Mock(return_value='')
collector_mock = patch.multiple(
NtpdCollector,
get_ntpq_output=ntpq_data,
get_ntpdc_kerninfo_output=ntpdc_kerninfo_data,
get_ntpdc_sysinfo_output=ntpdc_sysinfo_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
Changaco/oh-mainline
|
refs/heads/master
|
vendor/packages/sphinx/sphinx/ext/inheritance_diagram.py
|
15
|
# -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import inspect
import __builtin__ as __builtin__ # as __builtin__ is for lib2to3 compatibility
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = vars(__builtin__).values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
baselist = []
all_classes[cls] = (nodename, fullname, baselist)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _) in self.class_info.values()]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for cls, (name, fullname, bases) in self.class_info.items():
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
# Use first line of docstring as tooltip, if available
if cls.__doc__:
doc = cls.__doc__.strip().split("\n")[0]
if doc:
doc = doc.replace('"', '\\"')
this_node_attrs['tooltip'] = '"%s"' % doc
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
'private-bases': directives.flag,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options)
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
|
demarle/VTK
|
refs/heads/master
|
Filters/Core/Testing/Python/financialField.py
|
26
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class financialField(vtk.test.Testing.vtkTest):
def testFinancialField(self):
size = 3187 #maximum number possible
#set size 100 #maximum number possible
xAxis = "INTEREST_RATE"
yAxis = "MONTHLY_PAYMENT"
zAxis = "MONTHLY_INCOME"
scalar = "TIME_LATE"
# extract data from field as a polydata (just points), then extract scalars
fdr = vtk.vtkDataObjectReader()
fdr.SetFileName(VTK_DATA_ROOT + "/Data/financial.vtk")
do2ds = vtk.vtkDataObjectToDataSetFilter()
do2ds.SetInputConnection(fdr.GetOutputPort())
do2ds.SetDataSetTypeToPolyData()
#format: component#, arrayname, arraycomp, minArrayId, maxArrayId, normalize
do2ds.DefaultNormalizeOn()
do2ds.SetPointComponent(0, xAxis, 0)
do2ds.SetPointComponent(1, yAxis, 0, 0, size, 1)
do2ds.SetPointComponent(2, zAxis, 0)
do2ds.Update()
if fdr.GetOutput().GetFieldData().GetAbstractArray("Some Text").GetValue(0) != "Test me":
raise RuntimeError, 'Could not properly read string array "Some Text"'
fd2ad = vtk.vtkFieldDataToAttributeDataFilter()
fd2ad.SetInputConnection(do2ds.GetOutputPort())
fd2ad.SetInputFieldToDataObjectField()
fd2ad.SetOutputAttributeDataToPointData()
fd2ad.DefaultNormalizeOn()
fd2ad.SetScalarComponent(0, scalar, 0)
# construct pipeline for original population
popSplatter = vtk.vtkGaussianSplatter()
popSplatter.SetInputConnection(fd2ad.GetOutputPort())
popSplatter.SetSampleDimensions(50, 50, 50)
popSplatter.SetRadius(0.05)
popSplatter.ScalarWarpingOff()
popSurface = vtk.vtkMarchingContourFilter()
popSurface.SetInputConnection(popSplatter.GetOutputPort())
popSurface.SetValue(0, 0.01)
popMapper = vtk.vtkPolyDataMapper()
popMapper.SetInputConnection(popSurface.GetOutputPort())
popMapper.ScalarVisibilityOff()
popActor = vtk.vtkActor()
popActor.SetMapper(popMapper)
popActor.GetProperty().SetOpacity(0.3)
popActor.GetProperty().SetColor(.9, .9, .9)
# construct pipeline for delinquent population
lateSplatter = vtk.vtkGaussianSplatter()
lateSplatter.SetInputConnection(fd2ad.GetOutputPort())
lateSplatter.SetSampleDimensions(50, 50, 50)
lateSplatter.SetRadius(0.05)
lateSplatter.SetScaleFactor(0.05)
lateSurface = vtk.vtkMarchingContourFilter()
lateSurface.SetInputConnection(lateSplatter.GetOutputPort())
lateSurface.SetValue(0, 0.01)
lateMapper = vtk.vtkPolyDataMapper()
lateMapper.SetInputConnection(lateSurface.GetOutputPort())
lateMapper.ScalarVisibilityOff()
lateActor = vtk.vtkActor()
lateActor.SetMapper(lateMapper)
lateActor.GetProperty().SetColor(1.0, 0.0, 0.0)
# create axes
popSplatter.Update()
bounds = popSplatter.GetOutput().GetBounds()
axes = vtk.vtkAxes()
axes.SetOrigin(bounds[0], bounds[2], bounds[4])
axes.SetScaleFactor(popSplatter.GetOutput().GetLength() / 5.0)
axesTubes = vtk.vtkTubeFilter()
axesTubes.SetInputConnection(axes.GetOutputPort())
axesTubes.SetRadius(axes.GetScaleFactor() / 25.0)
axesTubes.SetNumberOfSides(6)
axesMapper = vtk.vtkPolyDataMapper()
axesMapper.SetInputConnection(axesTubes.GetOutputPort())
axesActor = vtk.vtkActor()
axesActor.SetMapper(axesMapper)
# label the axes
XText = vtk.vtkVectorText()
XText.SetText(xAxis)
XTextMapper = vtk.vtkPolyDataMapper()
XTextMapper.SetInputConnection(XText.GetOutputPort())
XActor = vtk.vtkFollower()
XActor.SetMapper(XTextMapper)
XActor.SetScale(0.02, .02, .02)
XActor.SetPosition(0.35, -0.05, -0.05)
XActor.GetProperty().SetColor(0, 0, 0)
YText = vtk.vtkVectorText()
YText.SetText(yAxis)
YTextMapper = vtk.vtkPolyDataMapper()
YTextMapper.SetInputConnection(YText.GetOutputPort())
YActor = vtk.vtkFollower()
YActor.SetMapper(YTextMapper)
YActor.SetScale(0.02, .02, .02)
YActor.SetPosition(-0.05, 0.35, -0.05)
YActor.GetProperty().SetColor(0, 0, 0)
ZText = vtk.vtkVectorText()
ZText.SetText(zAxis)
ZTextMapper = vtk.vtkPolyDataMapper()
ZTextMapper.SetInputConnection(ZText.GetOutputPort())
ZActor = vtk.vtkFollower()
ZActor.SetMapper(ZTextMapper)
ZActor.SetScale(0.02, .02, .02)
ZActor.SetPosition(-0.05, -0.05, 0.35)
ZActor.GetProperty().SetColor(0, 0, 0)
# Graphics stuff
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName("vtk - Field.Data")
# Add the actors to the renderer, set the background and size
#
ren.AddActor(axesActor)
ren.AddActor(lateActor)
ren.AddActor(XActor)
ren.AddActor(YActor)
ren.AddActor(ZActor)
ren.AddActor(popActor) #it's last because its translucent)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
camera = vtk.vtkCamera()
camera.SetClippingRange(.274, 13.72)
camera.SetFocalPoint(0.433816, 0.333131, 0.449)
camera.SetPosition(-1.96987, 1.15145, 1.49053)
camera.SetViewUp(0.378927, 0.911821, 0.158107)
ren.SetActiveCamera(camera)
XActor.SetCamera(camera)
YActor.SetCamera(camera)
ZActor.SetCamera(camera)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "financialField.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(financialField, 'test')])
|
orchidinfosys/odoo
|
refs/heads/master
|
openerp/addons/test_new_api/tests/test_no_infinite_recursion.py
|
177
|
# -*- coding: utf-8 -*-
from openerp.tests import common
class test_no_infinite_recursion(common.TransactionCase):
def setUp(self):
super(test_no_infinite_recursion, self).setUp()
self.tstfct = self.registry['test_old_api.function_noinfiniterecursion']
def test_00_create_and_update(self):
"""
Check that computing old api function field does not cycle infinitely
See https://github.com/odoo/odoo/pull/7558
"""
cr, uid, context, tstfct = self.cr, self.uid, {}, self.tstfct
vals = {
'f0': 'Test create',
}
idnew = tstfct.create(cr, uid, vals, context=context)
tst = tstfct.browse(cr, uid, idnew, context=context)
self.assertEqual(tst.f1, 'create')
vals = {
'f0': 'Test write',
}
tstfct.write(cr, uid, idnew, vals, context=context)
self.assertEqual(tst.f1, 'write')
|
Signbank/Auslan-signbank
|
refs/heads/master
|
signbank/attachments/migrations/__init__.py
|
12133432
| |
eharney/cinder
|
refs/heads/master
|
cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py
|
12133432
| |
yvaucher/stock-logistics-transport
|
refs/heads/8.0
|
transport_information/model/transport_vehicle.py
|
12
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2014 Camptocamp SA
# Author: Leonardo Pistone
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class TransportVehicle(models.Model):
_name = "transport.vehicle"
name = fields.Char('Name', required=True, translate=True)
|
stephen144/odoo
|
refs/heads/9.0
|
addons/stock/wizard/stock_return_picking.py
|
22
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class stock_return_picking_line(osv.osv_memory):
_name = "stock.return.picking.line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', string="Product", required=True),
'quantity': fields.float("Quantity", digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'wizard_id': fields.many2one('stock.return.picking', string="Wizard"),
'move_id': fields.many2one('stock.move', "Move"),
}
class stock_return_picking(osv.osv_memory):
_name = 'stock.return.picking'
_description = 'Return Picking'
_columns = {
'product_return_moves': fields.one2many('stock.return.picking.line', 'wizard_id', 'Moves'),
'move_dest_exists': fields.boolean('Chained Move Exists', readonly=True, help="Technical field used to hide help tooltip if not needed"),
'original_location_id': fields.many2one('stock.location'),
'parent_location_id': fields.many2one('stock.location'),
'location_id': fields.many2one('stock.location', 'Return Location',
domain="['|', ('id', '=', original_location_id), '&', ('return_location', '=', True), ('id', 'child_of', parent_location_id)]")
}
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary with default values for all field in ``fields``
"""
result1 = []
if context is None:
context = {}
if context and context.get('active_ids', False):
if len(context.get('active_ids')) > 1:
raise osv.except_osv(_('Warning!'), _("You may only return one picking at a time!"))
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
uom_obj = self.pool.get('product.uom')
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
quant_obj = self.pool.get("stock.quant")
chained_move_exist = False
if pick:
if pick.state != 'done':
raise UserError(_("You may only return pickings that are Done!"))
for move in pick.move_lines:
if move.move_dest_id:
chained_move_exist = True
#Sum the quants in that location that can be returned (they should have been moved by the moves that were included in the returned picking)
qty = 0
quant_search = quant_obj.search(cr, uid, [('history_ids', 'in', move.id), ('qty', '>', 0.0), ('location_id', 'child_of', move.location_dest_id.id)], context=context)
for quant in quant_obj.browse(cr, uid, quant_search, context=context):
if not quant.reservation_id or quant.reservation_id.origin_returned_move_id.id != move.id:
qty += quant.qty
qty = uom_obj._compute_qty(cr, uid, move.product_id.uom_id.id, qty, move.product_uom.id)
result1.append((0, 0, {'product_id': move.product_id.id, 'quantity': qty, 'move_id': move.id}))
if len(result1) == 0:
raise UserError(_("No products to return (only lines in Done state and not fully returned yet can be returned)!"))
if 'product_return_moves' in fields:
res.update({'product_return_moves': result1})
if 'move_dest_exists' in fields:
res.update({'move_dest_exists': chained_move_exist})
if 'parent_location_id' in fields and pick.location_id.usage == 'internal':
res.update({'parent_location_id':pick.picking_type_id.warehouse_id and pick.picking_type_id.warehouse_id.view_location_id.id or pick.location_id.location_id.id})
if 'original_location_id' in fields:
res.update({'original_location_id': pick.location_id.id})
if 'location_id' in fields:
res.update({'location_id': pick.location_id.id})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
move_obj = self.pool.get('stock.move')
pick_obj = self.pool.get('stock.picking')
uom_obj = self.pool.get('product.uom')
data_obj = self.pool.get('stock.return.picking.line')
pick = pick_obj.browse(cr, uid, record_id, context=context)
data = self.read(cr, uid, ids[0], context=context)
returned_lines = 0
# Cancel assignment of existing chained assigned moves
moves_to_unreserve = []
for move in pick.move_lines:
to_check_moves = [move.move_dest_id] if move.move_dest_id.id else []
while to_check_moves:
current_move = to_check_moves.pop()
if current_move.state not in ('done', 'cancel') and current_move.reserved_quant_ids:
moves_to_unreserve.append(current_move.id)
split_move_ids = move_obj.search(cr, uid, [('split_from', '=', current_move.id)], context=context)
if split_move_ids:
to_check_moves += move_obj.browse(cr, uid, split_move_ids, context=context)
if moves_to_unreserve:
move_obj.do_unreserve(cr, uid, moves_to_unreserve, context=context)
#break the link between moves in order to be able to fix them later if needed
move_obj.write(cr, uid, moves_to_unreserve, {'move_orig_ids': False}, context=context)
#Create new picking for returned products
pick_type_id = pick.picking_type_id.return_picking_type_id and pick.picking_type_id.return_picking_type_id.id or pick.picking_type_id.id
new_picking = pick_obj.copy(cr, uid, pick.id, {
'move_lines': [],
'picking_type_id': pick_type_id,
'state': 'draft',
'origin': pick.name,
'location_id': pick.location_dest_id.id,
'location_dest_id': data['location_id'] and data['location_id'][0] or pick.location_id.id,
}, context=context)
for data_get in data_obj.browse(cr, uid, data['product_return_moves'], context=context):
move = data_get.move_id
if not move:
raise UserError(_("You have manually created product lines, please delete them to proceed"))
new_qty = data_get.quantity
if new_qty:
# The return of a return should be linked with the original's destination move if it was not cancelled
if move.origin_returned_move_id.move_dest_id.id and move.origin_returned_move_id.move_dest_id.state != 'cancel':
move_dest_id = move.origin_returned_move_id.move_dest_id.id
else:
move_dest_id = False
returned_lines += 1
location_id = data['location_id'] and data['location_id'][0] or move.location_id.id
move_obj.copy(cr, uid, move.id, {
'product_id': data_get.product_id.id,
'product_uom_qty': new_qty,
'picking_id': new_picking,
'state': 'draft',
'location_id': move.location_dest_id.id,
'location_dest_id': location_id,
'picking_type_id': pick_type_id,
'warehouse_id': pick.picking_type_id.warehouse_id.id,
'origin_returned_move_id': move.id,
'procure_method': 'make_to_stock',
'move_dest_id': move_dest_id,
})
if not returned_lines:
raise UserError(_("Please specify at least one non-zero quantity."))
pick_obj.action_confirm(cr, uid, [new_picking], context=context)
pick_obj.action_assign(cr, uid, [new_picking], context=context)
return new_picking, pick_type_id
def create_returns(self, cr, uid, ids, context=None):
"""
Creates return picking and returns act_window to new picking
"""
new_picking_id, pick_type_id = self._create_returns(cr, uid, ids, context=context)
# Override the context to disable all the potential filters that could have been set previously
ctx = context.copy()
ctx.update({
'search_default_picking_type_id': pick_type_id,
'search_default_draft': False,
'search_default_assigned': False,
'search_default_confirmed': False,
'search_default_ready': False,
'search_default_late': False,
'search_default_available': False,
})
return {
'name': _('Returned Picking'),
'view_type': 'form',
'view_mode': 'form,tree,calendar',
'res_model': 'stock.picking',
'res_id': new_picking_id,
'type': 'ir.actions.act_window',
'context': ctx,
}
|
ekalosak/numpy
|
refs/heads/master
|
numpy/polynomial/tests/test_legendre.py
|
123
|
"""Tests for legendre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])
L1 = np.array([0, 1])
L2 = np.array([-1, 0, 3])/2
L3 = np.array([0, -3, 0, 5])/2
L4 = np.array([3, 0, -30, 0, 35])/8
L5 = np.array([0, 15, 0, -70, 0, 63])/8
L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16
L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16
L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128
L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128
Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
def trim(x):
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self):
assert_equal(leg.legzero, [0])
def test_legone(self):
assert_equal(leg.legone, [1])
def test_legx(self):
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0, 1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_legval(self):
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1, 0]).shape, dims)
assert_equal(leg.legval(x, [1, 0, 0]).shape, dims)
def test_legval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = leg.legval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_legval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = leg.legval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_leggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = leg.leggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_leggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = leg.leggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_legint(self):
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
res = leg.legint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c) for c in c2d])
res = leg.legint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
res = leg.legint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_legder(self):
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_legder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
res = leg.legder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legder(c) for c in c2d])
res = leg.legder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_legvander(self):
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
def test_legvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = leg.legvander2d(x1, x2, [1, 2])
tgt = leg.legval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_legvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
tgt = leg.legval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_legfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
assert_raises(ValueError, leg.legcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(leg.legcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_legfromroots(self):
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self):
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self):
assert_equal(leg.legline(3, 4), [3, 4])
def test_leg2poly(self):
for i in range(10):
assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
def test_poly2leg(self):
for i in range(10):
assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
nojhan/pyxshell
|
refs/heads/master
|
src/pyxshell/pipeline.py
|
1
|
# -*- coding: utf-8 -*-
from functools import wraps
import itertools
class PipeLine(object):
"""
A coroutine wrapper which enables pipelining syntax.
:class:`PipeLine` allows you to flatten once-nested code just by wrapping
your generators. The class provides combinators in the form of operators,
allowing you to plug two generators together without having to nest lots of
function calls. For example::
>>> def summer(stdin):
... sum = 0
... for item in stdin:
... sum += item
... yield sum
>>> pipeline = PipeLine(lambda: iter([1, 2, 3, 4])) | PipeLine(summer)
>>> pipeline
<PipeLine: <lambda> | summer>
>>> for item in pipeline:
... print(item)
1
3
6
10
The yielded output of each generator in the chain becomes the input for the
next. The rules for writing a pipeline function are simple:
:class:`PipeLine` requires a callable which accepts a single argument (the
input), and returns an iterator. The only exception is the first part of
the pipeline, which should accept no arguments (as there will be no input).
To create pipeline functions, use the :func:`pipe` decorator::
>>> @pipe
... def my_generator():
... yield 1
... yield 2
... yield 3
>>> pl = my_generator()
>>> pl
<PipeLine: my_generator>
>>> for item in pl:
... print(item)
1
2
3
If your pipeline accepts input, an iterator will be provided as the first
argument to the function::
>>> @pipe
... def add_one(input):
... for item in input:
... yield item + 1
>>> pl = my_generator() | add_one()
>>> pl
<PipeLine: my_generator | add_one>
>>> for item in pl:
... print(item)
2
3
4
Even with input, your functions can still accept other parameters::
>>> @pipe
... def adder(input, amount):
... for item in input:
... yield item + amount
>>> pl = my_generator() | adder(3)
>>> pl
<PipeLine: my_generator | adder>
>>> for item in pl:
... print(item)
4
5
6
Some operators are overridden to provide pipeline combinators (methods
which take multiple pipelines and return a new pipeline). For example,
multiplying two pipelines gets you their cross product::
>>> pl = my_generator() | (adder(3) * adder(6))
>>> pl
<PipeLine: my_generator | adder * adder>
>>> for item in pl:
... print(item)
(4, 7)
(4, 8)
(4, 9)
(5, 7)
(5, 8)
(5, 9)
(6, 7)
(6, 8)
(6, 9)
Adding two pipelines will chain the same input through both::
>>> pl = my_generator() | (adder(3) + adder(12))
>>> pl
<PipeLine: my_generator | adder + adder>
>>> for item in pl:
... print(item)
4
5
6
13
14
15
"""
__slots__ = ('coro_func',)
def __init__(self, coro_func):
self.coro_func = coro_func
@property
def __name__(self):
return self.coro_func.__name__
def __repr__(self):
return '<PipeLine: %s>' % getattr(self.coro_func, '__name__',
repr(self.coro_func))
# in self
def __iter__(self):
return self.coro_func()
# self | target
def __or__(self, target):
"""
Connect a pipe to something.
If the target is a callable, try to call it as a pipe.
NOTE: this does not work when connecting data structures to callable,
like in: `range(5) | tee`,
because we cannot overload function (FIXME?).
>>> @pipe
... def echo(i):
... yield i
...
>>> @pipe
... def tee(l):
... for i in l:
... print(i)
... yield i
...
>>> list(echo("Brian") | tee())
Brian
['Brian']
>>> list(echo("Brian") | tee)
Brian
['Brian']
"""
# if target is a callable, the user may ask for a call to a PipeLine
# without parenthesis, like in: `echo("Brian") | tee`
if hasattr(target, '__call__'):
# thus, try to call the target on the input pipe
return target(iter(self))
else:
# if it is not a callable, it may be a pipeable
# just connect it as a pipe
return target.__ror__(self)
# source | self
def __ror__(self, source):
r"""
Connect something to a pipe so that one's output becomes
the other's input.
A simple example::
>>> try:
... from itertools import imap
... except ImportError:
... imap = map
>>> p = (PipeLine(lambda: iter([1, 2, 3, 4])) |
... PipeLine(lambda stdin: imap(lambda x: x + 3, stdin)))
>>> p
<PipeLine: <lambda> | <lambda>>
>>> list(p)
[4, 5, 6, 7]
"""
def pipe():
return self.coro_func(iter(source))
pipe.__name__ = '%s | %s' % (
getattr(source, '__name__', repr(source)),
getattr(self.coro_func, '__name__', repr(self.coro_func)))
return PipeLine(pipe)
# self > target
def __gt__(self, target):
"""
Redirect the generator output to a file or a variable.
Erase the existing content of the target.
>>> @pipe
... def echo(i):
... yield i
...
>>> import os
>>> import sys
>>> echo("Brian") > sys.stdout
Brian
>>> d=[] ; echo("Brian") > d ; print(d)
['Brian']
>>> echo("Brian") > os.devnull
"""
if isinstance(target, str):
# w = erase existing content
with open(target, "w") as fd:
for line in iter(self):
fd.write(line)
elif hasattr(target, "write"):
for line in iter(self):
target.write(line)
elif hasattr(target, "append"):
# empty the target
del target[:]
for line in iter(self):
target.append(line)
else:
raise TypeError
# self >> target
def __rshift__(self, target):
"""
Append the generator output to a file or a variable.
Do not erase the existing content.
WARNING: this is an overloading of a binary operator, which have
priority over |. You should thus use parenthesis around the generators
sequence before using it.
>>> @pipe
... def no(stdin):
... for line in stdin:
... yield line
...
>>> import os
>>> import sys
>>> (["Bri", "an"] | no()) >> sys.stdout
Brian
>>> d = []
>>> (["Bri", "an"] | no()) >> d
>>> print(d)
['Bri', 'an']
>>> (["Bri", "an"] | no()) >> os.devnull
>>> try:
... ["Bri", "an"] | no() >> sys.stdout
... except Exception as error:
... print(error.__class__.__name__)
TypeError
"""
if isinstance(target, str):
# a = append to file
with open(target, "a") as fd:
for line in iter(self):
fd.write(line)
elif hasattr(target, "write"):
for line in iter(self):
target.write(line)
elif hasattr(target, "append"):
for line in iter(self):
target.append(line)
else:
raise TypeError
# self * other
def __mul__(self, other):
"""
Yield the cross product between two alternative pipes.
A simple example::
>>> @pipe
... def echo(values):
... for x in values:
... yield x
>>> list(echo([0, 1]) * echo([9, 10]))
[(0, 9), (0, 10), (1, 9), (1, 10)]
"""
def product(stdin=None):
if stdin is None:
return itertools.product(self, other)
stdin1, stdin2 = itertools.tee(stdin, 2)
return itertools.product((stdin1 | self), (stdin2 | other))
product.__name__ = '%s * %s' % (
getattr(self.coro_func, '__name__', repr(self.coro_func)),
getattr(other, '__name__', repr(other)))
return pipe(product)()
# self + other
def __add__(self, other):
"""
Yield the chained output of two alternative pipes.
Example::
>>> @pipe
... def echo(values):
... for x in values:
... yield x
>>> list(echo([1, 2, 3]) + echo([4, 5, 6]))
[1, 2, 3, 4, 5, 6]
"""
def concat(stdin=None):
if stdin is None:
return itertools.chain(self, other)
stdin1, stdin2 = itertools.tee(stdin, 2)
return itertools.chain((stdin1 | self), (stdin2 | other))
concat.__name__ = '%s + %s' % (
getattr(self.coro_func, '__name__', repr(self.coro_func)),
getattr(other, '__name__', repr(other)))
return pipe(concat)()
def pipe(func):
"""
Wrap a function as a pipeline.
>>> @pipe
... def printer(stdin, outfile=None):
... for item in stdin:
... print(item)
... yield item
...
>>> @pipe
... def echo(*values):
... for value in values:
... yield value
...
>>> p = printer()
>>> print(p)
<PipeLine: printer>
>>> p = echo(1, 2, 3) | p
>>> print(p)
<PipeLine: echo | printer>
>>> output = list(p)
1
2
3
>>> output
[1, 2, 3]
"""
@wraps(func)
def wrapper(*args, **kwargs):
@wraps(func)
def coro_func(stdin=None):
if stdin is None:
return func(*args, **kwargs)
return func(stdin, *args, **kwargs)
return PipeLine(coro_func)
return wrapper
|
KiChjang/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_optional_constraints.py
|
170
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface OptionalConstraints1 {
void foo(optional byte arg1, byte arg2);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw,
"Should not have thrown on non-optional argument following "
"optional argument.")
parser = parser.reset()
parser.parse("""
interface OptionalConstraints2 {
void foo(optional byte arg1 = 1, optional byte arg2 = 2,
optional byte arg3, optional byte arg4 = 4,
optional byte arg5, optional byte arg6 = 9);
};
""")
results = parser.finish()
args = results[0].members[0].signatures()[0][1]
harness.check(len(args), 6, "Should have 6 arguments")
harness.check(args[5].defaultValue.value, 9,
"Should have correct default value")
|
tell10glu/libgdx
|
refs/heads/master
|
extensions/gdx-freetype/jni/freetype-2.5.5/src/tools/docmaker/docmaker.py
|
146
|
#!/usr/bin/env python
#
# docmaker.py
#
# Convert source code markup to HTML documentation.
#
# Copyright 2002, 2004, 2008, 2013, 2014 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This program is a re-write of the original DocMaker tool used to generate
# the API Reference of the FreeType font rendering engine by converting
# in-source comments into structured HTML.
#
# This new version is capable of outputting XML data as well as accepting
# more liberal formatting options. It also uses regular expression matching
# and substitution to speed up operation significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""Main program loop."""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:],
"ht:o:p:",
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor,
project_title,
project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
if __name__ == '__main__':
main( sys.argv )
# eof
|
IRI-Research/django
|
refs/heads/master
|
tests/admin_validation/models.py
|
192
|
"""
Tests of ModelAdmin validation logic.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, related_name="album1_set")
album2 = models.ForeignKey(Album, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author)
book = models.ForeignKey(Book)
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State)
|
rhertzog/librement
|
refs/heads/master
|
src/librement/profile/migrations/0006_auto__add_field_profile_rss_url.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.rss_url'
db.add_column('profile_profile', 'rss_url',
self.gf('django.db.models.fields.CharField')(default='', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.rss_url'
db.delete_column('profile_profile', 'rss_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.profile': {
'Meta': {'object_name': 'Profile'},
'account_type': ('django.db.models.fields.IntegerField', [], {'default': 0}),
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'biography': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.IntegerField', [], {'default': 215}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'picture_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'rss_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['auth.User']"}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['profile']
|
lablup/sorna-client
|
refs/heads/master
|
src/ai/backend/client/cli/admin/resource_policies.py
|
1
|
import sys
import click
from tabulate import tabulate
from . import admin
from ...session import Session
from ..pretty import print_error, print_fail
@admin.command()
@click.option('-n', '--name', type=str, default=None,
help='Name of the resource policy.')
def resource_policy(name):
"""
Show details about a keypair resource policy. When `name` option is omitted, the
resource policy for the current access_key will be returned.
"""
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
rp = session.ResourcePolicy(session.config.access_key)
info = rp.info(name, fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
rows = []
if info is None:
print('No such resource policy.')
sys.exit(1)
for name, key in fields:
rows.append((name, info[key]))
print(tabulate(rows, headers=('Field', 'Value')))
@admin.group(invoke_without_command=True)
@click.pass_context
def resource_policies(ctx):
'''
List and manage resource policies.
(admin privilege required)
'''
if ctx.invoked_subcommand is not None:
return
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
items = session.ResourcePolicy.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no keypair resource policies.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields)))
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str, default='UNLIMITED',
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str, default='{}',
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int, default=30,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int, default=1,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int, default=10,
help='Number of maximum virtual folders allowed.')
@click.option('--max-vfolder-size', type=int, default=0,
help='Maximum virtual folder size (future plan).')
@click.option('--idle-timeout', type=int, default=1800,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
# @click.option('--allowed-vfolder-hosts', type=click.Tuple(str), default=['local'],
# help='Locations to create virtual folders.')
@click.option('--allowed-vfolder-hosts', default=['local'],
help='Locations to create virtual folders.')
def add(name, default_for_unspecified, total_resource_slots, max_concurrent_sessions,
max_containers_per_session, max_vfolder_count, max_vfolder_size,
idle_timeout, allowed_vfolder_hosts):
'''
Add a new keypair resource policy.
NAME: NAME of a new keypair resource policy.
'''
with Session() as session:
try:
data = session.ResourcePolicy.create(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
.format(data['msg']))
sys.exit(1)
item = data['resource_policy']
print('Keypair resource policy ' + item['name'] + ' is created.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str,
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str,
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int,
help='Number of maximum virtual folders allowed.')
@click.option('--max-vfolder-size', type=int,
help='Maximum virtual folder size (future plan).')
@click.option('--idle-timeout', type=int,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
@click.option('--allowed-vfolder-hosts', help='Locations to create virtual folders.')
def update(name, default_for_unspecified, total_resource_slots,
max_concurrent_sessions, max_containers_per_session, max_vfolder_count,
max_vfolder_size, idle_timeout, allowed_vfolder_hosts):
"""
Update an existing keypair resource policy.
NAME: NAME of a keypair resource policy to update.
"""
with Session() as session:
try:
data = session.ResourcePolicy.update(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
.format(data['msg']))
sys.exit(1)
print('Update succeeded.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
def delete(name):
"""
Delete a keypair resource policy.
NAME: NAME of a keypair resource policy to delete.
"""
with Session() as session:
if input('Are you sure? (y/n): ').lower().strip()[:1] != 'y':
print('Canceled.')
sys.exit(1)
try:
data = session.ResourcePolicy.delete(name)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy deletion has failed: {0}'
.format(data['msg']))
sys.exit(1)
print('Resource policy ' + name + ' is deleted.')
|
fnouama/intellij-community
|
refs/heads/master
|
python/lib/Lib/code.py
|
108
|
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == '__main__':
import pdb
pdb.run("interact()\n")
|
xrmx/django
|
refs/heads/master
|
tests/migration_test_data_persistence/tests.py
|
368
|
from django.test import TestCase, TransactionTestCase
from .models import Book
class MigrationDataPersistenceTestCase(TransactionTestCase):
"""
Tests that data loaded in migrations is available if we set
serialized_rollback = True on TransactionTestCase
"""
available_apps = ["migration_test_data_persistence"]
serialized_rollback = True
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
class MigrationDataNormalPersistenceTestCase(TestCase):
"""
Tests that data loaded in migrations is available on TestCase
"""
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
|
movmov/cc
|
refs/heads/master
|
vendor/tornado/demos/blog/blog.py
|
5
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import markdown
import os.path
import re
import tornado.auth
import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import unicodedata
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("mysql_host", default="127.0.0.1:3306", help="blog database host")
define("mysql_database", default="blog", help="blog database name")
define("mysql_user", default="blog", help="blog database user")
define("mysql_password", default="blog", help="blog database password")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", HomeHandler),
(r"/archive", ArchiveHandler),
(r"/feed", FeedHandler),
(r"/entry/([^/]+)", EntryHandler),
(r"/compose", ComposeHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
]
settings = dict(
blog_title=u"Tornado Blog",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
ui_modules={"Entry": EntryModule},
xsrf_cookies=True,
cookie_secret="11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/auth/login",
)
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = tornado.database.Connection(
host=options.mysql_host, database=options.mysql_database,
user=options.mysql_user, password=options.mysql_password)
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_current_user(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.db.get("SELECT * FROM authors WHERE id = %s", int(user_id))
class HomeHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 5")
if not entries:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
class EntryHandler(BaseHandler):
def get(self, slug):
entry = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
class ArchiveHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC")
self.render("archive.html", entries=entries)
class FeedHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 10")
self.set_header("Content-Type", "application/atom+xml")
self.render("feed.xml", entries=entries)
class ComposeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
id = self.get_argument("id", None)
entry = None
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
self.render("compose.html", entry=entry)
@tornado.web.authenticated
def post(self):
id = self.get_argument("id", None)
title = self.get_argument("title")
text = self.get_argument("markdown")
html = markdown.markdown(text)
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
if not entry: raise tornado.web.HTTPError(404)
slug = entry.slug
self.db.execute(
"UPDATE entries SET title = %s, markdown = %s, html = %s "
"WHERE id = %s", title, text, html, int(id))
else:
slug = unicodedata.normalize("NFKD", title).encode(
"ascii", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug: slug = "entry"
while True:
e = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not e: break
slug += "-2"
self.db.execute(
"INSERT INTO entries (author_id,title,slug,markdown,html,"
"published) VALUES (%s,%s,%s,%s,%s,UTC_TIMESTAMP())",
self.current_user.id, title, slug, text, html)
self.redirect("/entry/" + slug)
class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
author = self.db.get("SELECT * FROM authors WHERE email = %s",
user["email"])
if not author:
# Auto-create first author
any_author = self.db.get("SELECT * FROM authors LIMIT 1")
if not any_author:
author_id = self.db.execute(
"INSERT INTO authors (email,name) VALUES (%s,%s)",
user["email"], user["name"])
else:
self.redirect("/")
return
else:
author_id = author["id"]
self.set_secure_cookie("user", str(author_id))
self.redirect(self.get_argument("next", "/"))
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class EntryModule(tornado.web.UIModule):
def render(self, entry):
return self.render_string("modules/entry.html", entry=entry)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
webgeodatavore/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/2_auto.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "1_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
mozilla/moztrap
|
refs/heads/master
|
moztrap/model/core/admin.py
|
5
|
from django.contrib import admin
from preferences.admin import PreferencesAdmin
from ..mtadmin import MTTabularInline, MTModelAdmin, TeamModelAdmin
from .models import Product, ProductVersion, CorePreferences, ApiKey
class ProductVersionInline(MTTabularInline):
model = ProductVersion
extra = 0
class ApiKeyAdmin(MTModelAdmin):
list_display = ["owner", "active", "key"]
list_filter = ["active"]
admin.site.register(Product, TeamModelAdmin, inlines=[ProductVersionInline])
admin.site.register(ProductVersion, TeamModelAdmin, list_filter=["product"])
admin.site.register(CorePreferences, PreferencesAdmin)
admin.site.register(ApiKey, ApiKeyAdmin)
|
gvrossom/ants
|
refs/heads/master
|
src/profiles/signals.py
|
73
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
import logging
from . import models
logger = logging.getLogger("project")
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile_handler(sender, instance, created, **kwargs):
if not created:
return
# Create the profile object, only if it is newly created
profile = models.Profile(user=instance)
profile.save()
logger.info('New user profile for {} created'.format(instance))
|
merc-devel/merc
|
refs/heads/master
|
merc/__main__.py
|
1
|
import merc.application
merc.application.main()
|
pymedusa/SickRage
|
refs/heads/master
|
lib/pkg_resources/_vendor/__init__.py
|
12133432
| |
M3nin0/supreme-broccoli
|
refs/heads/master
|
Web/Flask/site_/lib/python3.5/site-packages/pkg_resources/_vendor/__init__.py
|
12133432
| |
rlugojr/rekall
|
refs/heads/master
|
rekall-agent/rekall_agent/locations/cloud.py
|
1
|
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Author: Michael Cohen scudette@google.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Location handlers for the Cloud.
This module provides the ability to write to Google Cloud Storage in various
ways.
"""
import base64
import contextlib
import json
import gzip
import os
import rfc822
import StringIO
import urllib
import tempfile
import time
from wsgiref import handlers
import arrow
import httplib2
import requests
from requests import adapters
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import openssl
from cryptography.hazmat.primitives import serialization
from oauth2client import service_account as service_account_module
from rekall import utils
from rekall_agent import common
from rekall_agent import location
from rekall_agent import serializer
__author__ = "Michael Cohen <scudette@google.com>"
MAX_BUFF_SIZE = 10*1024*1024
class ServiceAccount(common.AgentConfigMixin, serializer.SerializedObject):
"""A GCS service account is an entity with delegation privileges.
A Service account is used for the creation of GCSSignedURLLocation and
GCSSignedPolicyLocation. Both of these Location objects allow the possessor
of these to upload files to specific buckets.
You can obtain a service account from the Google Cloud Console:
1) Select IAM and admin, Service accounts -> Create Service Account.
2) Select Role: "Storage Object Admin".
3) Select "Furnish a new private key" and export to JSON.
Store the json file and provide the path to it to the
agent_server_initialize_gcs plugin.
"""
schema = [
dict(name="type",
doc="The type of account (should be 'service_account')"),
dict(name="private_key", hidden=True),
dict(name="client_email"),
]
def _sign_blob(self, blob):
key = serialization.load_pem_private_key(
str(self.private_key), password=None, backend=openssl.backend)
signer = key.signer(padding.PKCS1v15(), hashes.SHA256())
signer.update(str(blob))
return signer.finalize()
_scopes = ["https://www.googleapis.com/auth/devstorage.full_control"]
_oauth_token = None
_oauth_token_age = 0
@utils.safe_property
def oauth_token(self):
# The google api and oauth2client libraries use httplib2 instead of
# requests. Unfortunately httplib2 is terrible (not thread safe, no
# streaming interface) so we just grab the access_token from the
# credentials object and use it directly in the requests library anyway.
max_lifetime = (service_account_module.ServiceAccountCredentials.
MAX_TOKEN_LIFETIME_SECS)
# Refresh token at least this often.
if (self._oauth_token is None or
self._oauth_token_age < time.time() - max_lifetime / 2):
credentials = (service_account_module.
ServiceAccountCredentials.
from_json_keyfile_dict(
self.to_primitive(False),
scopes=self._scopes))
# Its ok to use httplib2 just for refreshing the tokens.
http = httplib2.Http()
credentials.refresh(http)
self._oauth_token = credentials.access_token
self._oauth_token_age = time.time()
return self._oauth_token
def create_oauth_location(self, path="", bucket=None, public=False):
# If the bucket is not specified take it from the server's config.
if bucket is None:
bucket = self._config.server.bucket
headers = GCSHeaders(session=self._session)
if public:
headers.SetMember("x-goog-acl", "public-read")
return GCSOAuth2BasedLocation.from_keywords(
session=self._session, bucket=bucket, path=path,
headers=headers)
def create_signed_policy_location(self, expiration=None, path_prefix=None,
bucket=None, path_template=None):
"""Generate a GCSSignedPolicyLocation object.
The generated Location object grants its possessor the respected acl
rights for all paths starting with the specified prefix. Note that
GCSSignedPolicyLocation is only useful for writing.
https://cloud.google.com/storage/docs/xml-api/post-object#policydocument
"""
if expiration is None:
expiration = int(time.time()) + 60 * 60
# If the bucket is not specified take it from the server's config.
if bucket is None:
bucket = self._config.server.bucket
policy = dict(expiration=arrow.get(expiration).isoformat(),
conditions=[
["starts-with", "$key",
utils.join_path(bucket, path_prefix)],
{"bucket": bucket},
{"Content-Encoding": "gzip"},
])
encoded_policy = json.dumps(policy, sort_keys=True)
signature = self._sign_blob(base64.b64encode(encoded_policy))
return GCSSignedPolicyLocation.from_keywords(
session=self._session,
policy=encoded_policy,
signature=signature,
bucket=bucket,
path_prefix=path_prefix,
path_template=path_template,
GoogleAccessId=self.client_email,
expiration=expiration)
def create_signed_url_location(
self, mode="r", expiration=None, path=None, bucket=None,
upload="direct", headers=None, public=False):
"""A Factory for GCSSignedURLLocation() instances.
Args:
mode: Can be "r" for reading, "w" for writing.
expiration: When this URL should expire. By default 1 hour.
path: The path within the bucket for the object.
bucket: The bucket name.
"""
if headers is None:
headers = GCSHeaders(session=self._session)
if public:
headers.SetMember("x-goog-acl", "public-read")
elif isinstance(headers, dict):
headers = GCSHeaders.from_primitive(
headers, self._session)
if mode == "r":
method = "GET"
elif mode == "w":
method = "PUT"
if upload == "resumable":
method = "POST"
# Resumable uploads require these headers.
headers.SetMember("x-goog-resumable", "start")
else:
raise IOError("Mode not supported")
if expiration is None:
# Default 1 hour from now.
expiration = time.time() + 60 * 60
# If the bucket is not specified take it from the server's config.
if bucket is None:
bucket = self._config.server.bucket
# Build the signed string according to
# https://cloud.google.com/storage/docs/access-control/signed-urls#string-components
components = []
components.append(method) # HTTP_Verb
components.append("") # Content_MD5
components.append("") # Content_Type
components.append(str(int(expiration))) # Expiration
for k, v in sorted(headers.to_primitive(False).iteritems()):
components.append("%s:%s" % (k, v))
path = urllib.quote(path, safe="/:")
base_url = "/" + utils.join_path(bucket, path)
components.append(base_url) # Canonicalized_Resource
signature_string = "\n".join(components)
return GCSSignedURLLocation.from_keywords(
session=self._session,
signature=self._sign_blob(signature_string),
GoogleAccessId=self.client_email,
expiration=expiration,
bucket=bucket,
path=path,
method=method,
headers=headers,
upload=upload,
)
class GCSLocation(location.Location):
"""The location for the base of the installation on GCS."""
schema = [
dict(name="bucket",
doc="Name of the bucket"),
dict(name="upload", type="choices", default=u"direct", hidden=True,
choices=[u"direct", u"resumable"],
doc="Type of upload mechanism."),
dict(name="path",
doc="The path to the object in the bucket."),
]
def __init__(self, *args, **kwargs):
super(GCSLocation, self).__init__(*args, **kwargs)
self._cache = self._config.server.cache
def get_canonical(self, **_):
return GCSLocation.from_keywords(
session=self._session,
bucket=self.bucket,
path=self.path)
def get_requests_session(self):
requests_session = self._session.GetParameter("requests_session")
if requests_session == None:
# To make sure we can use the requests session in the threadpool we
# need to make sure that the connection pool can block. Otherwise it
# will raise when it runs out of connections and the threads will be
# terminated.
requests_session = requests.Session()
requests_session.mount("https://", adapters.HTTPAdapter(
pool_connections=10, pool_maxsize=300, max_retries=10,
pool_block=True))
requests_session.mount("http://", adapters.HTTPAdapter(
pool_connections=10, pool_maxsize=300, max_retries=10,
pool_block=True))
self._session.SetCache("requests_session", requests_session)
return requests_session
def _get_parameters(self, **_):
"""Get request parameters.
To be overridden by derived classes.
Returns
Tuple of url, parameters, headers
"""
raise NotImplementedError()
def read_file(self, completion_routine=None, **kw):
if self._cache:
try:
local_filename = self.get_local_filename(**kw)
with open(local_filename, "rb") as fd:
return fd.read(MAX_BUFF_SIZE)
except IOError:
return ""
# We need to download the file.
url_endpoint, params, headers, _ = self._get_parameters(
**kw)
resp = self.get_requests_session().get(
url_endpoint, params=params, headers=headers)
if not resp.ok:
return self._report_error(completion_routine, resp)
return resp.content
def write_file(self, data, **kwargs):
return self.upload_file_object(StringIO.StringIO(data), **kwargs)
def _upload_direct(self, fd, completion_routine=None, **kwargs):
url_endpoint, params, headers, _ = self._get_parameters(**kwargs)
headers["Content-Encoding"] = "gzip"
resp = self.get_requests_session().put(
url_endpoint, data=GzipWrapper(self._session, fd),
params=params, headers=headers)
self._session.logging.debug("Uploaded file: %s (%s bytes)",
self.to_path(), fd.tell())
return self._report_error(completion_routine, resp)
def _upload_resumable(self, fd, completion_routine=None, **kwargs):
url_endpoint, params, headers, _ = self._get_parameters(**kwargs)
fd.seek(0, 2)
file_length = fd.tell()
fd.seek(0)
params["name"] = url_endpoint
params["uploadType"] = "resumable"
headers["x-goog-resumable"] = "start"
headers["Content-Length"] = "0"
resp = self.get_requests_session().post(
url_endpoint, params=params, headers=headers)
# The server will now tell us where to write the chunks.
try:
upload_location = resp.headers["Location"]
except KeyError:
self._session.logging.error("Unable to upload file: %s", resp.text)
return self._report_error(completion_routine, resp)
# Blocksize must be a multiple of 256kb.
BLOCK_SIZE = 256 * 1024 * 5
while 1:
offset = fd.tell()
data = fd.read(BLOCK_SIZE)
if not data:
break
headers = {
"Content-Length": str(len(data)),
"Content-Range": "bytes %d-%d/%d" % (
offset, offset + len(data) -1, file_length)
}
resp = self.get_requests_session().put(
upload_location, data=data, headers=headers)
self._session.report_progress(
"%s: Uploaded %s/%s", self.to_path(), offset, file_length)
return self._report_error(completion_routine, resp)
def upload_file_object(self, fd, completion_routine=None, **kwargs):
if self.upload == "direct":
return self._upload_direct(
fd, completion_routine=completion_routine, **kwargs)
# Resumable upload
elif self.upload == "resumable":
self._upload_resumable(
fd, completion_routine=completion_routine, **kwargs)
else:
self._report_error(completion_routine,
message="Unknown upload method")
def upload_local_file(self, local_filename=None, fd=None,
completion_routine=None, delete=True, **kwargs):
if local_filename:
fd = open(local_filename, "rb")
result = self.upload_file_object(
fd, completion_routine=completion_routine, **kwargs)
if delete and local_filename:
os.unlink(local_filename)
return result
def get_local_filename(self, completion_routine=None, **kwargs):
# We need to download the file locally.
url_endpoint, params, headers, base_url = self._get_parameters(
**kwargs)
current_generation = self._cache.get_generation(base_url)
if current_generation:
headers["If-None-Match"] = current_generation
with contextlib.closing(
self.get_requests_session().get(
url_endpoint, params=params, headers=headers,
stream=True)) as resp:
# Object not modified just return the cached object.
if resp.status_code == 304:
return self._cache.get_local_file(base_url, current_generation)
if not resp.ok:
# The file was removed from the server, make sure to expire the
# local copy too.
if resp.status_code == 404:
self._cache.expire(base_url)
return self._report_error(completion_routine, resp)
# Store the generation of this object in the cache.
current_generation = json.loads(resp.headers["ETag"])
filename = self._cache.store_at_generation(
base_url, current_generation,
iterator=resp.iter_content(chunk_size=1024*1024))
# Report success.
self._report_error(completion_routine, resp)
return filename
def list_files(self, **kwargs):
"""A generator of Location object below this one."""
raise NotImplementedError()
def to_path(self):
return utils.join_path(self.bucket, self.path)
def _report_error(self, completion_routine, response=None,
message=None):
if response:
# Only include the text in case of error.
if not response.ok:
status = location.Status(response.status_code, response.text)
else:
status = location.Status(response.status_code)
else:
status = location.Status(500, message)
if response is None or not response.ok:
if completion_routine:
return completion_routine(status)
raise IOError(response.text)
else:
if completion_routine:
completion_routine(status)
return response.ok
class GCSHeaders(serializer.SerializedObject):
"""Some headers that can be set."""
schema = [
dict(name="x-goog-resumable"),
dict(name="x-goog-acl"),
dict(name="Content-Encoding"),
]
class GCSOAuth2BasedLocation(GCSLocation):
"""This location uses the regular Oauth2 based mechanism.
This only works on the server with a valid ServiceAccount credential but
allows us to use the full JSON based API.
"""
schema = [
dict(name="headers", type=GCSHeaders, hidden=True),
dict(name="generation", hidden=True),
]
def _get_parameters(self, if_modified_since=None, generation=None, **_):
"""Calculates the params for the request."""
base_url = self.to_path()
url_endpoint = ('https://storage.googleapis.com/%s' % base_url)
headers = self.headers.to_primitive(False)
headers["Authorization"] = (
"Bearer " + self._config.server.service_account.oauth_token)
headers["Cache-Control"] = "private"
if if_modified_since:
headers["If-Modified-Since"] = handlers.format_date_time(
if_modified_since)
params = {}
generation = generation or self.generation
if generation:
params["generation"] = generation
return url_endpoint, params, headers, base_url
def read_modify_write_local_file(self, modification_cb, *args):
"""Atomically modifies this location.
We first download this object to the local filesystem cache, then we
modify it and then try to upload. If another modification occurred we
replay the callback until success.
Note that the modification_cb will be called with the filename to
modify. It may be called multiple times.
"""
url_endpoint, _, headers, base_url = self._get_parameters()
for retry in range(5):
local_file_should_be_removed = False
current_generation = None
try:
try:
local_filename = self.get_local_filename()
# The current generation in the cache.
current_generation = self._cache.get_generation(base_url)
except IOError:
# File does not exist on the server, make a tmpfile.
fd, local_filename = tempfile.mkstemp()
os.close(fd)
# Dont forget to remove the tempfile.
local_file_should_be_removed = True
# Now let the callback modify the file.
modification_cb(local_filename, *args)
# We may only write if this is the current generation.
if current_generation:
headers["If-Match"] = current_generation
headers["Content-Encoding"] = "gzip"
resp = self.get_requests_session().put(
url_endpoint, data=GzipWrapper(
self._session, open(local_filename, "rb")),
headers=headers)
# OK - all went well.
if resp.ok:
new_generation = json.loads(resp.headers["ETag"])
# Update the cache into a new generation.
self._cache.update_local_file_generation(
base_url, new_generation, local_filename)
# Do not remove the local file because it was moved by the
# cache.
local_file_should_be_removed = False
self._session.logging.info("Modified: %s", self.to_path())
return True
# The generation on the server has changed. Abort, wait a bit
# and retry.
if resp.status_code == 304:
time.sleep(0.1 * retry)
continue
finally:
if local_file_should_be_removed:
os.unlink(local_filename)
raise IOError("Unable to update %s" % self)
def read_modify_write(self, modification_cb, *args):
"""Atomically modify this location in a race free way.
modification_cb will receive the content of the file, and passed args
and should return the new content of the file.
Note that modification_cb can be called several times if a lock failure
is detected.
The underlying implementation is described here:
https://cloud.google.com/storage/docs/object-versioning
"""
def cb(filename, modification_cb, *args):
with open(filename, "rb") as fd:
data = fd.read()
new_data = modification_cb(data, *args)
# Update the file.
with open(filename, "wb") as fd:
fd.write(new_data)
self.read_modify_write_local_file(cb, modification_cb, *args)
def stat(self, **kwargs):
"""Gets information about an object."""
url_endpoint, params, headers, _ = self._get_parameters(**kwargs)
resp = self.get_requests_session().head(
url_endpoint, params=params, headers=headers)
if resp.ok:
return location.LocationStat.from_keywords(
session=self._session,
location=self,
size=resp.headers["x-goog-stored-content-length"],
generation=resp.headers["x-goog-generation"],
created=arrow.Arrow(*(rfc822.parsedate(
resp.headers["Last-Modified"])[:7])).timestamp,
)
def delete(self, completion_routine=None, **kwargs):
"""Deletes the current location."""
url_endpoint, params, headers, _ = self._get_parameters(**kwargs)
resp = self.get_requests_session().delete(
url_endpoint, params=params, headers=headers)
return self._report_error(completion_routine, resp)
def list_files(self, completion_routine=None, paging=100,
max_results=100, **kwargs):
"""A generator of Location object below this one."""
_, params, headers, _ = self._get_parameters(**kwargs)
url_endpoint = ("https://www.googleapis.com/storage/v1/b/%s/o" %
self.bucket)
params["prefix"] = utils.join_path(self.path)
params["maxResults"] = paging
count = 0
while count < max_results:
resp = self.get_requests_session().get(
url_endpoint, params=params, headers=headers)
if not resp.ok:
self._report_error(completion_routine, resp)
return
data = json.loads(resp.text)
items = data.get("items", [])
for item in items:
sublocation = self.copy()
sublocation.path = item["name"]
sublocation.generation = item["generation"]
count += 1
yield location.LocationStat.from_keywords(
session=self._session,
location=sublocation,
size=item["size"],
generation=item["generation"],
created=arrow.get(item["timeCreated"]).timestamp,
updated=arrow.get(item["updated"]).timestamp)
next_page_token = data.get("nextPageToken")
if not next_page_token or not items:
break
params["pageToken"] = next_page_token
class GCSUnauthenticatedLocation(GCSLocation):
"""A read only, unauthenticated location."""
def _get_parameters(self, if_modified_since=None):
base_url = self.to_path()
url_endpoint = ('https://storage.googleapis.com/%s' %
base_url.lstrip("/"))
headers = {"Cache-Control": "private"}
if if_modified_since:
headers["If-Modified-Since"] = handlers.format_date_time(
if_modified_since)
return url_endpoint, {}, headers, base_url
def read_file(self, **kw):
url_endpoint, _, headers, _ = self._get_parameters(**kw)
resp = self.get_requests_session().get(
url_endpoint, headers=headers)
if resp.ok:
return resp.content
return ""
class GCSSignedURLLocation(GCSLocation):
"""A Location object which can be used to access a signed URL."""
schema = [
dict(name="method", type="choices", default="GET", hidden=True,
choices=["GET", "POST", "PUT"]),
dict(name="signature", type="str", hidden=True,
doc="The signature to use when accessing the resource."),
dict(name="GoogleAccessId", hidden=True,
doc="The email form of the service account id"),
dict(name="expiration", type="int",
doc="When the url expires."),
dict(name="headers", type=GCSHeaders, hidden=True),
]
def _get_parameters(self):
"""Calculates the params for the request."""
base_url = self.to_path()
url_endpoint = ('https://storage.googleapis.com/%s' %
base_url.lstrip("/"))
params = dict(GoogleAccessId=self.GoogleAccessId,
Expires="%d" % self.expiration,
Signature=base64.b64encode(self.signature))
headers = self.headers.to_primitive(False)
return url_endpoint, params, headers, base_url
def read_file(self, **kwargs):
if self.method != "GET":
raise IOError("GCSSignedURLLocation is not created for reading.")
return super(GCSSignedURLLocation, self).read_file(**kwargs)
def write_file(self, data, **kwargs):
if self.method != "PUT":
raise IOError("GCSSignedURLLocation is not created for writing.")
return super(GCSSignedURLLocation, self).write_file(data, **kwargs)
def get_local_filename(self, completion_routine=None, **kwargs):
if self.method != "GET":
raise IOError("Unable to read file. This location is "
"only opened for Writing.")
return super(GCSSignedURLLocation, self).get_local_filename(
completion_routine=completion_routine, **kwargs)
class GzipWrapper(object):
"""Wrap an fd to produce a compressed stream from it."""
BUFFER_SIZE = 1024 * 1024
def __init__(self, session, infd):
self.session = session
self.total_read = 0
self.infd = infd
self.buff = ""
self.zipper = gzip.GzipFile(mode="wb", fileobj=self)
def write(self, data):
"""This function is called by the GzipFile writer."""
self.buff += data
def read(self, length=10000000000):
"""This is called by readers if this wrapper."""
# Read infd until we have length available in self.buff.
while self.zipper and len(self.buff) < length:
data = self.infd.read(self.BUFFER_SIZE)
if not data and self.zipper:
# infd is finished.
self.zipper.flush()
self.zipper.close()
self.zipper = None
break
self.total_read += len(data)
self.session.report_progress("Read %s bytes", self.total_read)
self.zipper.write(data)
result, self.buff = self.buff[:length], self.buff[length:]
return result
def flush(self):
pass
def __iter__(self):
while 1:
data = self.read(self.BUFFER_SIZE)
if not data:
break
yield data
class GCSSignedPolicyLocation(GCSLocation):
"""A Location object which uses a policy to access a URL."""
schema = [
dict(name="policy", type="str", hidden=True,
doc="The policy document."),
dict(name="signature", type="str", hidden=True,
doc="The signature to use when accessing the resource."),
dict(name="path_prefix",
doc="Access is allowed to all paths starting with this prefix."),
dict(name="path_template",
doc="A template from which to expand the complete path."),
dict(name="GoogleAccessId", hidden=True,
doc="The email form of the service account id"),
dict(name="expiration", type="int",
doc="When the url expires."),
dict(name="headers", type=GCSHeaders, hidden=True),
]
def expand_path(self, subpath="", **kwargs):
"""Expand the complete path using the client's config."""
kwargs["client_id"] = self._config.client.writeback.client_id
kwargs["nonce"] = self._config.client.nonce
kwargs["subpath"] = subpath
return self.path_template.format(**kwargs)
def get_canonical(self, **kwargs):
return GCSLocation.from_keywords(
session=self._session,
bucket=self.bucket,
path=utils.join_path(self.path_prefix, self.expand_path(**kwargs))
)
def _get_parameters(self, **kwargs):
"""Calculates the params for the request."""
subpath = self.expand_path(**kwargs)
key = utils.join_path(self.bucket, self.path_prefix, subpath)
url_endpoint = "https://storage.googleapis.com/"
params = dict(GoogleAccessId=self.GoogleAccessId,
Signature=base64.b64encode(self.signature),
Policy=base64.b64encode(self.policy),
bucket=self.bucket,
key=key)
params["content-encoding"] = "gzip"
headers = {"content-encoding": "gzip"}
return url_endpoint, params, headers, key
def upload_file_object(self, fd, completion_routine=None, **kwargs):
url_endpoint, params, headers, base_url = self._get_parameters(**kwargs)
resp = self.get_requests_session().post(
url_endpoint, params,
files=dict(file=GzipWrapper(self._session, fd)),
headers=headers)
self._session.logging.debug(
"Uploaded file: %s (%s bytes)", base_url, fd.tell())
return self._report_error(completion_routine, resp)
|
hankcs/HanLP
|
refs/heads/master
|
hanlp/transform/tsv.py
|
1
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-06-13 21:15
import functools
from abc import ABC
from typing import Tuple, Union, Optional, Iterable, List
import tensorflow as tf
from hanlp_common.structure import SerializableDict
from hanlp.common.transform_tf import Transform
from hanlp.common.vocab_tf import VocabTF
from hanlp.utils.io_util import generate_words_tags_from_tsv
from hanlp.utils.tf_util import str_tensor_to_str
from hanlp_common.util import merge_locals_kwargs
def dataset_from_tsv(tsv_file_path, word_vocab: VocabTF, char_vocab: VocabTF, tag_vocab: VocabTF, batch_size=32,
shuffle=None, repeat=None, prefetch=1, lower=False, **kwargs):
generator = functools.partial(generate_words_tags_from_tsv, tsv_file_path, word_vocab, char_vocab, tag_vocab, lower)
return dataset_from_generator(generator, word_vocab, tag_vocab, batch_size, shuffle, repeat, prefetch,
**kwargs)
def dataset_from_generator(generator, word_vocab, tag_vocab, batch_size=32, shuffle=None, repeat=None, prefetch=1,
**kwargs):
shapes = [None], [None]
types = tf.string, tf.string
defaults = word_vocab.pad_token, tag_vocab.pad_token if tag_vocab.pad_token else tag_vocab.first_token
dataset = tf.data.Dataset.from_generator(generator, output_shapes=shapes, output_types=types)
if shuffle:
if isinstance(shuffle, bool):
shuffle = 1024
dataset = dataset.shuffle(shuffle)
if repeat:
dataset = dataset.repeat(repeat)
dataset = dataset.padded_batch(batch_size, shapes, defaults).prefetch(prefetch)
return dataset
def vocab_from_tsv(tsv_file_path, lower=False, lock_word_vocab=False, lock_char_vocab=True, lock_tag_vocab=True) \
-> Tuple[VocabTF, VocabTF, VocabTF]:
word_vocab = VocabTF()
char_vocab = VocabTF()
tag_vocab = VocabTF(unk_token=None)
with open(tsv_file_path, encoding='utf-8') as tsv_file:
for line in tsv_file:
cells = line.strip().split()
if cells:
word, tag = cells
if lower:
word_vocab.add(word.lower())
else:
word_vocab.add(word)
char_vocab.update(list(word))
tag_vocab.add(tag)
if lock_word_vocab:
word_vocab.lock()
if lock_char_vocab:
char_vocab.lock()
if lock_tag_vocab:
tag_vocab.lock()
return word_vocab, char_vocab, tag_vocab
class TsvTaggingFormat(Transform, ABC):
def file_to_inputs(self, filepath: str, gold=True):
assert gold, 'TsvTaggingFormat does not support reading non-gold files'
yield from generate_words_tags_from_tsv(filepath, gold=gold, lower=self.config.get('lower', False),
max_seq_length=self.max_seq_length)
@property
def max_seq_length(self):
return self.config.get('max_seq_length', None)
class TSVTaggingTransform(TsvTaggingFormat, Transform):
def __init__(self, config: SerializableDict = None, map_x=True, map_y=True, use_char=False, **kwargs) -> None:
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.word_vocab: Optional[VocabTF] = None
self.tag_vocab: Optional[VocabTF] = None
self.char_vocab: Optional[VocabTF] = None
def fit(self, trn_path: str, **kwargs) -> int:
self.word_vocab = VocabTF()
self.tag_vocab = VocabTF(pad_token=None, unk_token=None)
num_samples = 0
for words, tags in self.file_to_inputs(trn_path, True):
self.word_vocab.update(words)
self.tag_vocab.update(tags)
num_samples += 1
if self.char_vocab:
self.char_vocab = VocabTF()
for word in self.word_vocab.token_to_idx.keys():
if word in (self.word_vocab.pad_token, self.word_vocab.unk_token):
continue
self.char_vocab.update(list(word))
return num_samples
def create_types_shapes_values(self) -> Tuple[Tuple, Tuple, Tuple]:
types = tf.string, tf.string
shapes = [None], [None]
values = self.word_vocab.pad_token, self.tag_vocab.first_token
return types, shapes, values
def inputs_to_samples(self, inputs, gold=False):
lower = self.config.get('lower', False)
if gold:
if lower:
for x, y in inputs:
yield x.lower(), y
else:
yield from inputs
else:
for x in inputs:
yield x.lower() if lower else x, [self.padding_values[-1]] * len(x)
def x_to_idx(self, x) -> Union[tf.Tensor, Tuple]:
return self.word_vocab.lookup(x)
def y_to_idx(self, y) -> tf.Tensor:
return self.tag_vocab.lookup(y)
def X_to_inputs(self, X: Union[tf.Tensor, Tuple[tf.Tensor]]) -> Iterable:
for xs in X:
words = []
for x in xs:
words.append(str_tensor_to_str(x) if self.char_vocab else self.word_vocab.idx_to_token[int(x)])
yield words
def Y_to_outputs(self, Y: Union[tf.Tensor, Tuple[tf.Tensor]], gold=False,
inputs=None, X=None, **kwargs) -> Iterable:
if not gold:
Y = tf.argmax(Y, axis=2)
for ys, xs in zip(Y, inputs):
tags = []
for y, x in zip(ys, xs):
tags.append(self.tag_vocab.idx_to_token[int(y)])
yield tags
def input_is_single_sample(self, input: Union[List[str], List[List[str]]]) -> bool:
return isinstance(input[0], str)
def input_truth_output_to_str(self, input: List[str], truth: List[str], output: List[str]):
text = ''
for word, gold_tag, pred_tag in zip(input, truth, output):
text += ' '.join([word, gold_tag, pred_tag]) + '\n'
text += '\n'
return text
|
machtfit/django-oscar
|
refs/heads/machtfit
|
src/oscar/views/generic.py
|
1
|
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import phonenumbers
from oscar.core.phonenumber import PhoneNumber
class PhoneNumberMixin(object):
"""
Validation mixin for forms with a phone number, and optionally a country.
It tries to validate the phone number, and on failure tries to validate it
using a hint (the country provided), and treating it as a local number.
"""
phone_number = forms.CharField(max_length=32, required=False)
def get_country(self):
# If the form data contains valid country information, we use that.
if hasattr(self, 'cleaned_data') and 'country' in self.cleaned_data:
return self.cleaned_data['country']
# Oscar hides the field if there's only one country. Then (and only
# then!) can we consider a country on the model instance.
elif 'country' not in self.fields and hasattr(
self.instance, 'country'):
return self.instance.country
def get_region_code(self, country):
return country.iso_3166_1_a2
def clean_phone_number(self):
number = self.cleaned_data['phone_number']
# empty
if number in validators.EMPTY_VALUES:
return None
# Check for an international phone format
try:
phone_number = PhoneNumber.from_string(number)
except phonenumbers.NumberParseException:
# Try hinting with the shipping country
country = self.get_country()
region_code = self.get_region_code(country)
if not region_code:
# There is no shipping country, not a valid international
# number
raise ValidationError(
_(u'This is not a valid international phone format.'))
# The PhoneNumber class does not allow specifying
# the region. So we drop down to the underlying phonenumbers
# library, which luckily allows parsing into a PhoneNumber
# instance
try:
phone_number = PhoneNumber.from_string(
number, region=region_code)
if not phone_number.is_valid():
raise ValidationError(
_(u'This is not a valid local phone format for %s.')
% country)
except phonenumbers.NumberParseException:
# Not a valid local or international phone number
raise ValidationError(
_(u'This is not a valid local or international phone'
u' format.'))
return phone_number
|
sirkubax/ansible-modules-extras
|
refs/heads/devel
|
network/f5/bigip_node.py
|
77
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author: "Matt Hite (@mhite)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
choices: []
aliases: []
user:
description:
- BIG-IP username
required: true
default: null
choices: []
aliases: []
password:
description:
- BIG-IP password
required: true
default: null
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Pool member state
required: true
default: present
choices: ['present', 'absent']
aliases: []
session_state:
description:
- Set new session availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
monitor_state:
description:
- Set monitor availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
choices: []
aliases: []
name:
description:
- "Node name"
required: false
default: null
choices: []
host:
description:
- "Node IP. Required when state=present and node does not exist. Error when state=absent."
required: true
default: null
choices: []
aliases: ['address', 'ip']
description:
description:
- "Node description."
required: false
default: null
choices: []
'''
EXAMPLES = '''
## playbook task examples:
---
# file bigip-test.yml
# ...
- hosts: bigip-test
tasks:
- name: Add node
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
partition=matthite
host="{{ ansible_default_ipv4["address"] }}"
name="{{ ansible_default_ipv4["address"] }}"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Modify node description
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
description="Our best server yet"
- name: Delete node
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=absent
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
local_action: >
bigip_node
server=lb.mydomain.com
user=admin
password=mysecret
state=present
session_state=disabled
monitor_state=disabled
partition=matthite
name="{{ ansible_default_ipv4["address"] }}"
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0])
result = True
desc = ""
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed, e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def main():
argument_spec=f5_argument_spec();
argument_spec.update(dict(
session_state = dict(type='str', choices=['enabled', 'disabled']),
monitor_state = dict(type='str', choices=['enabled', 'disabled']),
name = dict(type='str', required=True),
host = dict(type='str', aliases=['address', 'ip']),
description = dict(type='str')
)
)
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode=True
)
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
try:
api = bigip_api(server, user, password)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when " \
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is " \
"not supported by the API; " \
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
main()
|
ioannistsanaktsidis/inspire-next
|
refs/heads/master
|
inspire/base/format_elements/bfe_inspire_abstract.py
|
2
|
# -*- coding: utf-8 -*-
##
## This file is part of INSPIRE.
## Copyright (C) 2015 CERN.
##
## INSPIRE is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
"""BibFormat element - Prints English abstract.
"""
def format_element(bfo, prefix_en, suffix_en, escape="0", separator_en="<br/>"):
""" Print the abstract of a record in HTML in English.
@param prefix_en: a prefix for english abstract (printed only if english abstract exists)
@param suffix_en: a suffix for english abstract(printed only if english abstract exists)
@param escape: escaping method (overrides default escape parameter to not escape separators)
@param separator_en: a separator between each english abstract
"""
out = ''
# arXiv abstract should be last, so let's store it in a special variable
arxiv = ''
toggle_script = """
<script type="text/javascript">
function toggle_abstract(event, element) {
$(element).next().next().toggle();
event.preventDefault();
};
</script>
"""
def create_abstract_element(field, hide=False):
element = ""
if field.get('a'):
source = field.get('9')
if hide:
element += '<a href="#" onclick="toggle_abstract(event, this)">' + prefix_en
if source:
element += '(' + source + ')'
element += '</a>'
element += "<br/>"
element += "<span style='display: none;'>" + field.get('a')
element += separator_en + '</span>'
else:
element += prefix_en
if source:
element += '(' + source + ')'
element += '</a>'
element += "<br/>"
element += "<span>" + field.get('a')
element += separator_en + '</span>'
return element
try:
escape_mode_int = int(escape)
except ValueError:
escape_mode_int = 0
abstract_list = bfo.fields('520__', escape=escape_mode_int)
other_abstract = []
for abstract in abstract_list:
if abstract.get('9', "").lower() == 'arxiv':
# there should be only one arXiv abstract, so we can overwrite the arxiv variable
arxiv = abstract
elif abstract.get('a'):
other_abstract.append(abstract)
if other_abstract:
out = create_abstract_element(other_abstract[0], hide=False)
for abstract in other_abstract[1:]:
out += create_abstract_element(abstract, hide=True)
if arxiv:
out += create_abstract_element(arxiv, hide=True)
else:
if arxiv:
out = create_abstract_element(arxiv, hide=False)
if out:
out += suffix_en
out += toggle_script
return out
def escape_values(bfo):
"""
Check if output of this element should be escaped.
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
Johnetordoff/osf.io
|
refs/heads/develop
|
osf/migrations/0167_auto_20190506_1556.py
|
10
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from osf import features
from osf.utils.migrations import AddWaffleFlags
class Migration(migrations.Migration):
dependencies = [
('osf', '0166_merge_20190429_1632'),
]
operations = [
AddWaffleFlags([features.OSF_GROUPS], on_for_everyone=False),
]
|
jbowes/ansible-modules-extras
|
refs/heads/devel
|
monitoring/logentries.py
|
153
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Ivan Vanderbyl <ivan@app.io>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: logentries
author: "Ivan Vanderbyl (@ivanvanderbyl)"
short_description: Module for tracking logs via logentries.com
description:
- Sends logs to LogEntries in realtime
version_added: "1.6"
options:
path:
description:
- path to a log file
required: true
state:
description:
- following state of the log
choices: [ 'present', 'absent' ]
required: false
default: present
name:
description:
- name of the log
required: false
logtype:
description:
- type of the log
required: false
notes:
- Requires the LogEntries agent which can be installed following the instructions at logentries.com
'''
EXAMPLES = '''
- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log
- logentries: path=/var/log/nginx/error.log state=absent
'''
def query_log_status(module, le_path, path, state="present"):
""" Returns whether a log is followed or not. """
if state == "present":
rc, out, err = module.run_command("%s followed %s" % (le_path, path))
if rc == 0:
return True
return False
def follow_log(module, le_path, logs, name=None, logtype=None):
""" Follows one or more logs if not already followed. """
followed_count = 0
for log in logs:
if query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
cmd = [le_path, 'follow', log]
if name:
cmd.extend(['--name',name])
if logtype:
cmd.extend(['--type',logtype])
rc, out, err = module.run_command(' '.join(cmd))
if not query_log_status(module, le_path, log):
module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
followed_count += 1
if followed_count > 0:
module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
module.exit_json(changed=False, msg="logs(s) already followed")
def unfollow_log(module, le_path, logs):
""" Unfollows one or more logs if followed. """
removed_count = 0
# Using a for loop incase of error, we can report the package that failed
for log in logs:
# Query the log first, to see if we even need to remove.
if not query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([le_path, 'rm', log])
if query_log_status(module, le_path, log):
module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
removed_count += 1
if removed_count > 0:
module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
module.exit_json(changed=False, msg="logs(s) already unfollowed")
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
name = dict(required=False, default=None, type='str'),
logtype = dict(required=False, default=None, type='str', aliases=['type'])
),
supports_check_mode=True
)
le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
p = module.params
# Handle multiple log files
logs = p["path"].split(",")
logs = filter(None, logs)
if p["state"] in ["present", "followed"]:
follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
elif p["state"] in ["absent", "unfollowed"]:
unfollow_log(module, le_path, logs)
# import module snippets
from ansible.module_utils.basic import *
main()
|
joopert/home-assistant
|
refs/heads/dev
|
tests/helpers/test_deprecation.py
|
4
|
"""Test deprecation helpers."""
from homeassistant.helpers.deprecation import deprecated_substitute, get_deprecated
from unittest.mock import patch, MagicMock
class MockBaseClass:
"""Mock base class for deprecated testing."""
@property
@deprecated_substitute("old_property")
def new_property(self):
"""Test property to fetch."""
raise NotImplementedError()
class MockDeprecatedClass(MockBaseClass):
"""Mock deprecated class object."""
@property
def old_property(self):
"""Test property to fetch."""
return True
class MockUpdatedClass(MockBaseClass):
"""Mock updated class object."""
@property
def new_property(self):
"""Test property to fetch."""
return True
@patch("logging.getLogger")
def test_deprecated_substitute_old_class(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
mock_object = MockDeprecatedClass()
assert mock_object.new_property is True
assert mock_object.new_property is True
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
@patch("logging.getLogger")
def test_deprecated_substitute_new_class(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
mock_object = MockUpdatedClass()
assert mock_object.new_property is True
assert mock_object.new_property is True
assert not mock_logger.warning.called
@patch("logging.getLogger")
def test_config_get_deprecated_old(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
config = {"old_name": True}
assert get_deprecated(config, "new_name", "old_name") is True
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
@patch("logging.getLogger")
def test_config_get_deprecated_new(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
config = {"new_name": True}
assert get_deprecated(config, "new_name", "old_name") is True
assert not mock_logger.warning.called
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/pelix/utilities.py
|
4
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Utility methods and decorators
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
This file is part of iPOPO.
iPOPO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
iPOPO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with iPOPO. If not, see <http://www.gnu.org/licenses/>.
"""
# Pelix constants
import pelix.constants
# Standard library
import collections
import contextlib
import functools
import logging
import sys
import threading
import traceback
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# Using Python 3
PYTHON_3 = (sys.version_info[0] == 3)
# ------------------------------------------------------------------------------
@contextlib.contextmanager
def use_service(bundle_context, svc_reference):
"""
Utility context to safely use a service in a "with" block.
It looks after the the given service and releases its reference when
exiting the context.
:param bundle_context: The calling bundle context
:param svc_reference: The reference of the service to use
:return: The requested service
:raise BundleException: Service not found
:raise TypeError: Invalid service reference
"""
try:
# Give the service
yield bundle_context.get_service(svc_reference)
finally:
try:
# Release it
bundle_context.unget_service(svc_reference)
except pelix.constants.BundleException:
# Service might have already been unregistered
pass
# ------------------------------------------------------------------------------
class Deprecated(object):
"""
Prints a warning when using the decorated method
"""
def __init__(self, message=None, logger=None):
"""
Sets the deprecation message, e.g. to indicate which method to call
instead.
If a logger is given, its 'warning' method will be called to print the
message; else the standard 'print' method will be used.
:param message: Message to be printed
:param logger: The name of the logger to use, or None.
"""
self.__message = message or "Deprecated method"
self.__logger = logger or None
self.__already_logged = False
def __log(self, method_name):
"""
Logs the deprecation message on first call, does nothing after
:param method_name: Name of the deprecated method
"""
if not self.__already_logged:
# Print only if not already done
stack = '\n\t'.join(traceback.format_stack())
logging.getLogger(self.__logger).warning("%s: %s\n%s",
method_name,
self.__message,
stack)
self.__already_logged = True
def __call__(self, method):
"""
Applies the modifications
:param method: The decorated method
:return: The wrapped method
"""
# Prepare the wrapped call
@functools.wraps(method)
def wrapped(*args, **kwargs):
"""
Wrapped deprecated method
"""
self.__log(method.__name__)
return method(*args, **kwargs)
return wrapped
# ------------------------------------------------------------------------------
class Synchronized(object):
"""
A synchronizer for global methods
"""
def __init__(self, lock=None):
"""
Sets up the decorator. If 'lock' is None, an RLock() is created for
this decorator.
:param lock: The lock to be used for synchronization (can be None)
"""
if not is_lock(lock):
self.__lock = threading.RLock()
else:
self.__lock = lock
def __call__(self, method):
"""
Sets up the decorated method
:param method: The decorated method
:return: The wrapped method
"""
@functools.wraps(method)
def wrapped(*args, **kwargs):
"""
The wrapping method
"""
with self.__lock:
return method(*args, **kwargs)
return wrapped
def SynchronizedClassMethod(*locks_attr_names, **kwargs):
"""
A synchronizer decorator for class methods. An AttributeError can be raised
at runtime if the given lock attribute doesn't exist or if it is None.
If a parameter ``sorted`` is found in ``kwargs`` and its value is True,
then the list of locks names will be sorted before locking.
:param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be
used for synchronization
:return: The decorator method, surrounded with the lock
"""
# Filter the names (remove empty ones)
locks_attr_names = [lock_name
for lock_name in locks_attr_names
if lock_name]
if not locks_attr_names:
raise ValueError("The lock names list can't be empty")
if 'sorted' not in kwargs or kwargs['sorted']:
# Sort the lock names if requested
# (locking always in the same order reduces the risk of dead lock)
locks_attr_names = list(locks_attr_names)
locks_attr_names.sort()
def wrapped(method):
"""
The wrapping method
:param method: The wrapped method
:return: The wrapped method
:raise AttributeError: The given attribute name doesn't exist
"""
@functools.wraps(method)
def synchronized(self, *args, **kwargs):
"""
Calls the wrapped method with a lock
"""
# Raises an AttributeError if needed
locks = [getattr(self, attr_name)
for attr_name in locks_attr_names]
locked = collections.deque()
i = 0
try:
# Lock
for lock in locks:
if lock is None:
# No lock...
raise AttributeError(
"Lock '{0}' can't be None in class {1}"
.format(locks_attr_names[i], type(self).__name__))
# Get the lock
i += 1
lock.acquire()
locked.appendleft(lock)
# Use the method
return method(self, *args, **kwargs)
finally:
# Unlock what has been locked in all cases
for lock in locked:
lock.release()
locked.clear()
del locks[:]
return synchronized
# Return the wrapped method
return wrapped
def is_lock(lock):
"""
Tests if the given lock is an instance of a lock class
"""
if lock is None:
# Don't do useless tests
return False
for attr in ('acquire', 'release', '__enter__', '__exit__'):
if not hasattr(lock, attr):
# Missing something
return False
# Same API as a lock
return True
# ------------------------------------------------------------------------------
def read_only_property(value):
"""
Makes a read-only property that always returns the given value
"""
return property(lambda cls: value)
# ------------------------------------------------------------------------------
def remove_all_occurrences(sequence, item):
"""
Removes all occurrences of item in the given sequence
:param sequence: The items list
:param item: The item to be removed
"""
if sequence is None:
return
while item in sequence:
sequence.remove(item)
# ------------------------------------------------------------------------------
def add_listener(registry, listener):
"""
Adds a listener in the registry, if it is not yet in
:param registry: A registry (a list)
:param listener: The listener to register
:return: True if the listener has been added
"""
if listener is None or listener in registry:
return False
registry.append(listener)
return True
def remove_listener(registry, listener):
"""
Removes a listener from the registry
:param registry: A registry (a list)
:param listener: The listener to remove
:return: True if the listener was in the list
"""
if listener is not None and listener in registry:
registry.remove(listener)
return True
return False
# ------------------------------------------------------------------------------
if PYTHON_3:
# Python 3 interpreter : bytes & str
def is_string(string):
"""
Utility method to test if the given parameter is a string
(Python 2.x, 3.x) or a unicode (Python 2.x) object
:param string: A potential string object
:return: True if the given object is a string object or a Python 2.6
unicode object
"""
# Python 3 only have the str string type
return isinstance(string, str)
def to_bytes(data, encoding="UTF-8"):
"""
Converts the given string to an array of bytes.
Returns the first parameter if it is already an array of bytes.
:param data: A unicode string
:param encoding: The encoding of data
:return: The corresponding array of bytes
"""
if type(data) is bytes:
# Nothing to do
return data
return data.encode(encoding)
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return str(data, encoding)
# Same operation
# pylint: disable=C0103
to_unicode = to_str
else:
# Python 2 interpreter : str & unicode
def is_string(string):
"""
Utility method to test if the given parameter is a string
(Python 2.x, 3.x) or a unicode (Python 2.x) object
:param string: A potential string object
:return: True if the given object is a string object or a Python 2.6
unicode object
"""
# Python 2 also have unicode
return isinstance(string, (str, unicode))
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return data.encode(encoding)
# Same operation
# pylint: disable=C0103
to_bytes = to_str
def to_unicode(data, encoding="UTF-8"):
"""
Converts the given string to an unicode string using ``str.decode()``.
Returns the first parameter if it is already an instance of
``unicode``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding ``unicode`` string
"""
if type(data) is unicode:
# Nothing to do
return data
return data.decode(encoding)
# ------------------------------------------------------------------------------
def to_iterable(value, allow_none=True):
"""
Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value
"""
if value is None:
# None given
if allow_none:
return None
else:
return []
elif isinstance(value, (list, tuple, set, frozenset)):
# Iterable given, return it as-is
return value
# Return a one-value list
return [value]
# ------------------------------------------------------------------------------
class EventData(object):
"""
A threading event with some associated data
"""
def __init__(self):
"""
Sets up the event
"""
self.__event = threading.Event()
self.__data = None
self.__exception = None
@property
def data(self):
"""
Returns the associated value
"""
return self.__data
@property
def exception(self):
"""
Returns the exception used to stop the wait() method
"""
return self.__exception
def clear(self):
"""
Clears the event
"""
self.__event.clear()
self.__data = None
self.__exception = None
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set()
def raise_exception(self, exception):
"""
Raises an exception in wait()
:param exception: An Exception object
"""
self.__data = None
self.__exception = exception
self.__event.set()
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
result = self.__event.wait(timeout) or self.__event.is_set()
# pylint: disable=E0702
# Pylint seems to miss the "is None" check below
if self.__exception is None:
return result
else:
raise self.__exception
class CountdownEvent(object):
"""
Sets up an Event once the internal integer reaches 0
(kind of the opposite of a semaphore)
"""
def __init__(self, value):
"""
Sets up the counter
:param value: The initial value of the counter, which must be greater
than 0.
:raise ValueError: The value is not greater than 0
"""
if value <= 0:
raise ValueError("Initial value is not greater than 0")
self.__lock = threading.Lock()
self.__value = value
self.__event = threading.Event()
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def step(self):
"""
Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0
"""
with self.__lock:
self.__value -= 1
if self.__value == 0:
# All done
self.__event.set()
return True
elif self.__value < 0:
# Gone too far
raise ValueError("The counter has gone below 0")
return False
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
return self.__event.wait(timeout) or self.__event.is_set()
|
Duoxilian/home-assistant
|
refs/heads/dev
|
homeassistant/components/wemo.py
|
5
|
"""
Support for WeMo device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wemo/
"""
import logging
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_WEMO
from homeassistant.helpers import discovery
from homeassistant.helpers import config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
REQUIREMENTS = ['pywemo==0.4.13']
DOMAIN = 'wemo'
# Mapping from Wemo model_name to component.
WEMO_MODEL_DISPATCH = {
'Bridge': 'light',
'Insight': 'switch',
'Maker': 'switch',
'Sensor': 'binary_sensor',
'Socket': 'switch',
'LightSwitch': 'switch',
'CoffeeMaker': 'switch'
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
CONF_STATIC = 'static'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_STATIC, default=[]): vol.Schema([cv.string])
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, config):
"""Common setup for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
_, model_name, _, _, serial = discovery_info
# Only register a device once
if serial in KNOWN_DEVICES:
return
_LOGGER.debug('Discovered unique device %s', serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, 'switch')
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
discovery.listen(hass, SERVICE_WEMO, discovery_dispatch)
_LOGGER.info("Scanning for WeMo devices.")
devices = [(device.host, device) for device in pywemo.discover_devices()]
# Add static devices from the config file.
devices.extend((address, None)
for address in config.get(DOMAIN, {}).get(CONF_STATIC, []))
for address, device in devices:
port = pywemo.ouimeaux_device.probe_wemo(address)
if not port:
_LOGGER.warning('Unable to probe wemo at %s', address)
continue
_LOGGER.info('Adding wemo at %s:%i', address, port)
url = 'http://%s:%i/setup.xml' % (address, port)
if device is None:
device = pywemo.discovery.device_from_description(url, None)
discovery_info = (device.name, device.model_name, url, device.mac,
device.serialnumber)
discovery.discover(hass, SERVICE_WEMO, discovery_info)
return True
|
dtrip/powerline-shell
|
refs/heads/master
|
segments/ruby_version.py
|
20
|
import subprocess
def add_ruby_version_segment():
try:
p1 = subprocess.Popen(["ruby", "-v"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sed", "s/ (.*//"], stdin=p1.stdout, stdout=subprocess.PIPE)
version = p2.communicate()[0].rstrip()
if os.environ.has_key("GEM_HOME"):
gem = os.environ["GEM_HOME"].split("@")
if len(gem) > 1:
version += " " + gem[1]
powerline.append(version, 15, 1)
except OSError:
return
add_ruby_version_segment()
|
ghchinoy/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/engine/training_test.py
|
1
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import sys
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
class CompileTest(keras_parameterized.TestCase):
def _get_multi_output_model(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
output_a = keras.layers.Dense(1, name='dense_1')(input_a)
output_b = keras.layers.Dense(1, name='dense_2')(input_a)
return keras.models.Model(input_a, [output_a, output_b])
def _do_test_compile_with_model_and_single_loss(self, model, loss):
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss, loss)
loss = losses.get(loss)
if not isinstance(loss, list):
loss_list = [loss] * len(model.outputs)
self.assertEqual(len(model.loss_functions), len(loss_list))
for i in range(len(loss_list)):
self.assertIsInstance(model.loss_functions[i], losses.LossFunctionWrapper)
if not isinstance(loss_list[i], losses.LossFunctionWrapper):
self.assertEqual(model.loss_functions[i].fn, loss_list[i])
self.assertAllEqual(model._loss_weights_list, [1.] * len(loss_list))
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(('loss_string', 'mse'),
('loss_function', losses.mean_squared_error),
('loss_instance', losses.MeanSquaredError()))
def test_compile_with_single_output(self, loss):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
self._do_test_compile_with_model_and_single_loss(model, loss)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(('loss_string', 'mse'),
('loss_function', losses.mean_squared_error),
('loss_instance', losses.MeanSquaredError()))
def test_compile_with_multi_output(self, loss):
model = self._get_multi_output_model()
self._do_test_compile_with_model_and_single_loss(model, loss)
@keras_parameterized.run_all_keras_modes
def test_compile_with_multi_output_and_multi_loss(self):
model = self._get_multi_output_model()
# Test loss is a list.
loss = ['mse', 'mae']
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss_functions[0].fn, losses.mean_squared_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_absolute_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
# Test loss is a dict.
loss = {'dense_1': 'mae', 'dense_2': 'mse'}
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss_functions[0].fn, losses.mean_absolute_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_squared_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
@keras_parameterized.run_all_keras_modes
def test_compile_with_multi_output_and_loss_weights_list(self):
model = self._get_multi_output_model()
loss_weights = [1., 2.]
model.compile(
optimizer='adam',
loss='mse',
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
self.assertAllEqual(model._loss_weights_list, [1., 2.])
def test_compile_with_multi_output_and_loss_weights_dict(self):
with context.graph_mode():
model = self._get_multi_output_model()
loss_weights = {'dense_1': 1., 'dense_2': 2.}
model.compile(optimizer='adam', loss='mse', loss_weights=loss_weights)
self.assertAllEqual(model._loss_weights_list, [1., 2.])
input_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 1))
output_b_np = np.random.random((10, 1))
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
total_loss, y_preds = sess.run(
[model.total_loss, model.outputs],
feed_dict={
'input_a:0': input_np,
'dense_1_target:0': output_a_np,
'dense_2_target:0': output_b_np
})
self.assertAllClose(
total_loss,
np.mean(
np.add((output_a_np - y_preds[0])**2,
2 * (output_b_np - y_preds[1])**2)))
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_size(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(ValueError, 'The model has 1 outputs'):
model.compile(
optimizer='adam',
loss=['mse', 'mae'],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_key(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in loss dictionary: \[\'unknown_output\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss={'unknown_output': 'mse'},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_size(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(ValueError,
'it should have one entry per model output'):
model.compile(
optimizer='adam',
loss='mse',
loss_weights=[1., 2.],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_key(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in loss_weights dictionary: \[\'unknown_output\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss='mse',
loss_weights={'unknown_output': 1.},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_sample_weight_mode(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in sample_weight_mode dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss='mse',
sample_weight_mode={'unknown': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly())
class TrainingTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_fit_on_arrays(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test model with input data as a list of lists
model.fit(
[np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)],
[output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
if testing_utils.get_model_type() == 'functional':
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
validation_data=({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
if testing_utils.get_model_type() == 'functional':
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {
'dense': 'mse',
'dropout': metrics_module.CategoricalAccuracy()
}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
# TODO(gsundeep) Test only works in eager, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
# Test model on a list of floats
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 4))
model.fit([np.ndarray.tolist(input_a_np)],
[np.ndarray.tolist(input_b_np)],
epochs=2,
batch_size=5,
verbose=2)
@keras_parameterized.run_all_keras_modes
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_fit(self):
loss = {}
for reg in [None, 'l2']:
layers = [
keras.layers.Dense(
10, activation='relu', activity_regularizer=reg,
kernel_initializer='ones', use_bias=False),
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones',
use_bias=False),
]
model = testing_utils.get_model_from_layers(
layers, input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_loss_value(self):
layer = keras.layers.Dense(
1, kernel_initializer=keras.initializers.zeros(),
bias_initializer=keras.initializers.ones(), activity_regularizer='l2')
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.test_on_batch(x, y)
self.assertAlmostEqual(0.01, loss, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_batch_independent(self):
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(
10, activation='relu', activity_regularizer='l2')(
inputs)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
loss_small_batch = model.test_on_batch(x, y)
x2 = np.ones((20, 10), 'float32')
y2 = np.ones((20, 1), 'float32')
loss_big_batch = model.test_on_batch(x2, y2)
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_in_model_call(self):
class MyModel(keras.Model):
def call(self, inputs):
self.add_loss(inputs)
return inputs
x = ops.convert_to_tensor(1.)
model = MyModel()
_ = model(x)
self.assertEqual(1, len(model.losses))
@keras_parameterized.run_all_keras_modes
def test_custom_mapping_in_config(self):
class MyModel(keras.Model):
def call(self, inputs):
return inputs
def get_config(self):
self.a = {}
return {'a': self.a}
model = MyModel()
self.assertIn('{"a": {}}', model.to_json())
@keras_parameterized.run_all_keras_modes
def test_training_on_sparse_data_with_dense_placeholders(self):
# TODO(kaftan) Test seems to not work, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
self.skipTest('Skipping running model eagerly.')
if scipy_sparse is None:
return
test_inputs = [
scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)
]
test_outputs = [
scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)
]
in1 = keras.layers.Input(shape=(3,))
in2 = keras.layers.Input(shape=(3,))
out1 = keras.layers.Dropout(0.5, name='dropout')(in1)
out2 = keras.layers.Dense(4, name='dense_1')(in2)
model = keras.Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(test_inputs, test_outputs,
epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@keras_parameterized.run_all_keras_modes
def test_compile_with_sparse_placeholders(self):
# TODO(kaftan) Test seems to not work, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
self.skipTest('Skipping running model eagerly.')
input_layer = keras.layers.Input(shape=(10,), sparse=True)
weights = variables_lib.Variable(
np.ones((10, 1)).astype(np.float32), name='weights')
weights_mult = lambda x: sparse_ops.sparse_tensor_dense_matmul(x, weights)
output_layer = keras.layers.Lambda(weights_mult)(input_layer)
model = keras.Model([input_layer], output_layer)
model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_that_trainable_disables_updates(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_logs_passed_to_callbacks(self):
with self.cached_session():
input_dim = 5
num_classes = 1
class TestCallback(Callback):
def __init__(self):
super(TestCallback, self).__init__()
self.epoch_end_logs = None
self.batch_end_logs = None
self.epoch_end_call_count = 0
self.batch_end_call_count = 0
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_logs = logs
self.epoch_end_call_count += 1
def on_batch_end(self, batch, logs=None):
self.batch_end_logs = logs
self.batch_end_call_count += 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='binary_crossentropy',
metrics=['acc'],
weighted_metrics=['mae'],
optimizer=RMSPropOptimizer(learning_rate=0.01))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
test_callback = TestCallback()
model.fit(
x_train,
y_train,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[test_callback],
validation_data=(x_train, y_train))
self.assertEqual(test_callback.batch_end_call_count, 10)
self.assertEqual(test_callback.epoch_end_call_count, 2)
weighted_metric = ('mae'
if tf2.enabled() else 'weighted_mean_absolute_error')
self.assertSetEqual(
set(test_callback.batch_end_logs.keys()),
set(['batch', 'size', 'acc', 'loss', weighted_metric]))
self.assertSetEqual(
set(test_callback.epoch_end_logs.keys()),
set([
'acc', 'loss', weighted_metric, 'val_acc', 'val_loss',
'val_' + weighted_metric
]))
@keras_parameterized.run_all_keras_modes
def test_mismatched_output_shape_and_target_shape(self):
model = keras.Sequential([
keras.layers.Dense(2, input_shape=(3, 4)),
keras.layers.Dense(5),
])
model.compile(RMSPropOptimizer(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
# Test with Numpy data
x_train = np.random.random((10, 3, 4))
y_train = np.random.randint(0, 5, size=(10, 3))
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with iterator
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat(10)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=1, steps_per_epoch=2)
if context.executing_eagerly():
# Test with eager execution
model.compile(RMSPropOptimizer(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
run_eagerly=True)
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with eager execution and iterator
model.fit(iterator, epochs=1, steps_per_epoch=2)
def test_losses_in_defun(self):
with context.eager_mode():
layer = keras.layers.Dense(1, kernel_regularizer='l1')
layer(array_ops.ones([1, 10]))
@function.defun
def get_losses():
return layer.losses
self.assertAllEqual(
self.evaluate(layer.losses), self.evaluate(get_losses()))
@keras_parameterized.run_all_keras_modes
def test_logging(self):
mock_stdout = io.BytesIO() if six.PY2 else io.StringIO()
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
RMSPropOptimizer(learning_rate=0.001), loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
with test.mock.patch.object(sys, 'stdout', mock_stdout):
model.fit(
np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10)
self.assertTrue('Epoch 5/10' in mock_stdout.getvalue())
@tf_test_util.run_in_graph_and_eager_modes
def test_training_with_loss_instance(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
loss_weights = [1., 0.5]
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss=keras.losses.MeanSquaredError(),
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@tf_test_util.run_in_graph_and_eager_modes
def test_static_batch_in_input_layer(self):
class Counter(keras.callbacks.Callback):
def __init__(self):
self.batches = 0
def on_batch_end(self, batch, logs=None):
self.batches += 1
x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32')
for batch_size, expected_batches in [(None, 2), (4, 16)]:
inputs = keras.Input(batch_size=batch_size, shape=(10,))
outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
model = keras.Model(inputs, outputs)
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
model = keras.Sequential(
[keras.layers.Dense(1, batch_input_shape=(batch_size, 10))])
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
@tf_test_util.run_in_graph_and_eager_modes
def test_static_batch_in_input_layer_consistency_checks(self):
x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32')
inputs = keras.Input(batch_size=2, shape=(10,))
outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
model = keras.Model(inputs, outputs)
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
with self.assertRaisesRegexp(ValueError,
'incompatible with the specified batch size'):
model.fit(x, y, batch_size=4)
data = dataset_ops.DatasetV2.from_tensor_slices((x, y))
data = data.batch(4, drop_remainder=True)
with self.assertRaisesRegexp(ValueError,
'incompatible with the specified batch size'):
model.fit(data, steps_per_epoch=16)
@tf_test_util.run_in_graph_and_eager_modes
def test_compatible_batch_size_functional_model(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return array_ops.concat(inputs, axis=0)
input1 = keras.Input(batch_size=2, shape=(10,))
input2 = keras.Input(batch_size=3, shape=(10,))
outputs = MyLayer()([input1, input2])
with self.assertRaisesRegexp(ValueError,
'specified batch sizes of the Input Layers'):
keras.Model([input1, input2], outputs)
@tf_test_util.run_in_graph_and_eager_modes
def test_calling_subclass_model_on_different_datasets(self):
class SubclassedModel(keras.models.Model):
def call(self, inputs):
return inputs * 2
model = SubclassedModel()
dataset_one = dataset_ops.Dataset.range(2).batch(2)
dataset_two = dataset_ops.Dataset.range(3, 10).batch(2)
self.assertAllEqual([[0], [2]], model.predict(dataset_one, steps=1))
self.assertAllEqual([[6], [8], [10], [12]],
model.predict(dataset_two, steps=2))
def test_training_on_sparse_categorical_crossentropy_loss_with_softmax(self):
with context.eager_mode():
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = np.random.randint(0, 1, size=(100, 1))
reference_model = testing_utils.get_small_sequential_mlp(16, 2,
input_dim=4)
reference_model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
def test_training_on_categorical_crossentropy_loss_with_softmax(self):
with context.eager_mode():
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = keras.utils.to_categorical(np.random.randint(0, 1,
size=(100, 1)), 2)
reference_model = testing_utils.get_small_sequential_mlp(16, 2,
input_dim=4)
reference_model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
def test_training_on_binary_crossentropy_loss(self):
with context.eager_mode():
train_x = np.ones((100, 4), dtype=np.float32)
train_y = np.ones((100, 1), dtype=np.float32)
reference_model = testing_utils.get_small_sequential_mlp(16, 1,
input_dim=4)
reference_model.compile(loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4)
test_model.compile(loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
('default', 1, 4), ('integer_two', 2, 2), ('integer_four', 4, 1),
('simple_list', [1, 3, 4], 3), ('duplicated_list', [4, 2, 2], 2))
def test_validation_freq(self, validation_freq, expected_runs):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_small_mlp(2, 1, 10)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
class ValCounter(keras.callbacks.Callback):
def __init__(self):
self.val_runs = 0
def on_test_begin(self, logs=None):
self.val_runs += 1
val_counter = ValCounter()
model.fit(
x,
y,
epochs=4,
validation_data=(x, y),
validation_freq=validation_freq,
callbacks=[val_counter])
self.assertEqual(val_counter.val_runs, expected_runs)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_validation_steps_without_data(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_small_mlp(2, 1, 10)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
with self.assertRaisesRegexp(
ValueError, '`validation_steps` should not be specified if '
'`validation_data` is None.'):
model.fit(x, y, epochs=4, validation_data=None, validation_steps=3)
@keras_parameterized.run_all_keras_modes
def test_add_loss_correctness(self):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
inputs = keras.Input(shape=(1,))
targets = keras.Input(shape=(1,))
outputs = Bias()(inputs)
model = keras.Model([inputs, targets], outputs)
model.add_loss(2 * math_ops.reduce_mean(
keras.losses.mean_absolute_error(targets, outputs)))
model.add_loss(keras.losses.MeanAbsoluteError()(targets, outputs))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.025),
loss=keras.losses.MeanAbsoluteError(),
run_eagerly=testing_utils.should_run_eagerly())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
history = model.fit([x, y], y, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_unconditional_add_loss_correctness(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
# Reachable from the inputs but marked as unconditional.
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@keras_parameterized.run_all_keras_modes
def test_clear_losses(self):
class LayerWithSharedNestedLossLayer(keras.layers.Layer):
def __init__(self):
super(LayerWithSharedNestedLossLayer, self).__init__()
self.loss_layer = keras.layers.ActivityRegularization(l2=0.001)
self.add_weight(shape=(1,), regularizer='l2')
def call(self, x):
x = self.loss_layer(x)
return self.loss_layer(x)
inputs = keras.Input(shape=(1,))
outputs = LayerWithSharedNestedLossLayer()(inputs)
model = keras.Model(inputs, outputs)
# Weight loss + 2 activity losses.
self.assertEqual(len(model.losses), 3)
x = array_ops.ones((1, 1))
model(x)
y = array_ops.ones((1, 1))
model(y)
if context.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
else:
self.assertEqual(len(model.get_losses_for(x)), 2)
self.assertEqual(len(model.get_losses_for(y)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_layer_with_variable_output(self):
class VariableOutputLayer(keras.layers.Layer):
def build(self, input_shape):
self.v = self.add_weight('output_var', shape=(2, 5), initializer='ones')
def call(self, inputs):
return self.v
model = testing_utils.get_model_from_layers(
[VariableOutputLayer(), keras.layers.Dense(1)], input_shape=(10,))
# TODO(omalleyt): Make this work with `run_eagerly=True`.
model.compile('sgd', 'mse', run_eagerly=False)
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=5)
self.assertLen(model.trainable_variables, 3)
# TODO(b/131372221): Make this work with subclassed models.
@keras_parameterized.run_with_all_model_types(exclude_models=['subclass'])
@keras_parameterized.run_all_keras_modes
def test_model_dtype(self):
class AssertTypeLayer(keras.layers.Layer):
def __init__(self, assert_type=None, **kwargs):
super(AssertTypeLayer, self).__init__(**kwargs)
self.assert_type = assert_type
def call(self, inputs):
assert inputs.dtype.name == self.assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inputs.dtype.name, self.assert_type))
return inputs + 1.
for dtype in ('float16', 'float32', 'float64'):
model = testing_utils.get_model_from_layers([AssertTypeLayer(dtype)],
input_shape=(10,),
input_dtype=dtype)
model.compile('sgd', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((10, 10), dtype=dtype)
y = np.ones((10, 10), dtype=dtype)
model.fit(x, y)
model.test_on_batch(x, y)
model(x)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_subclassed_model_with_training_arg(self):
class LayerWithTrainingArg(keras.layers.Layer):
def call(self, inputs, training=None):
self.training = training
return inputs
class ModelWithTrainingArg(keras.Model):
def __init__(self):
super(ModelWithTrainingArg, self).__init__()
self.l1 = LayerWithTrainingArg()
def call(self, inputs, training=None):
self.training = training
inputs = self.l1(inputs, training=training)
return inputs
x = np.zeros((1, 2))
model = ModelWithTrainingArg()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, x, epochs=1)
if testing_utils.should_run_eagerly():
expected_training_arg = True
else:
expected_training_arg = keras.backend.symbolic_learning_phase()
self.assertEqual(model.training, expected_training_arg)
self.assertEqual(model.l1.training, expected_training_arg)
@keras_parameterized.run_all_keras_modes
def test_error_when_model_is_not_compiled(self):
inputs = keras.Input(shape=(1,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
with self.assertRaisesRegex(RuntimeError, 'must compile your model'):
model.fit(np.ones((1, 1)), np.ones((1, 1)))
class MyModel(keras.Model):
def call(self, x):
self.add_loss(math_ops.reduce_sum(x))
return x
model = MyModel()
with self.assertRaisesRegex(RuntimeError, 'must compile your model'):
model.fit(np.random.random((32, 1)), epochs=2)
class TestExceptionsAndWarnings(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_invalid_loss(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, loss='categorical_crossentropy')
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
if not context.executing_eagerly():
# TODO(psv): Investigate these use cases in eager mode.
with self.assertRaises(ValueError):
model.fit(x_train, y_train)
with self.assertRaises(ValueError):
model.compile(optimizer, loss=None,
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_warning_for_loss_missing_output(self):
with self.cached_session():
inp = keras.layers.Input(shape=(16,), name='input_a')
out_1 = keras.layers.Dense(8, name='dense_1')(inp)
out_2 = keras.layers.Dense(3, activation='softmax', name='dense_2')(out_1)
model = keras.models.Model(inputs=[inp], outputs=[out_1, out_2])
optimizer = RMSPropOptimizer(learning_rate=0.001)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.compile(
optimizer,
loss={
'dense_2': 'categorical_crossentropy',
},
metrics={
'dense_2': 'categorical_accuracy',
'dense_1': metrics_module.CategoricalAccuracy(),
},
run_eagerly=testing_utils.should_run_eagerly())
msg = ('Output dense_1 missing from loss dictionary. We assume this '
'was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to dense_1.')
self.assertRegexpMatches(str(mock_log.call_args), msg)
class LossWeightingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_class_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='categorical_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=learning_rate),
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = weight
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
def test_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(43)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = weight
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
ref_score = model.evaluate(
x_test, y_test, verbose=0, sample_weight=sample_weight)
score = model.evaluate(
x_test[test_ids, :],
y_test[test_ids, :],
verbose=0,
sample_weight=sample_weight[test_ids])
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
def test_temporal_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = weight
temporal_x_train = np.reshape(x_train, (len(x_train), 1,
x_train.shape[1]))
temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1)
temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1]))
temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1)
temporal_y_train = np.reshape(y_train, (len(y_train), 1,
y_train.shape[1]))
temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1)
temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1]))
temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1)
temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight),
1))
temporal_sample_weight = np.repeat(
temporal_sample_weight, timesteps, axis=1)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight)
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight,
validation_split=0.1)
model.train_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
model.test_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0)
if not context.executing_eagerly():
score = model.evaluate(
temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0)
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
def test_fit_with_incorrect_weights(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(2, name='output_1')
dropout = keras.layers.Dropout(0.5, name='output_2')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='adam',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((10, 3))
y = np.random.random((10, 2))
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in sample_weight dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'output_1\', \'output_2\'\]'):
model.fit([x, x], [y, y],
epochs=1,
sample_weight={'unknown': 'something'})
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in class_weight dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'output_1\', \'output_2\'\]'):
model.fit([x, x], [y, y], epochs=1, class_weight={'unknown': 'something'})
@keras_parameterized.run_all_keras_modes
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
model.compile(optimizer, loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
optimizer, loss='binary_crossentropy', sample_weight_mode=[],
run_eagerly=testing_utils.should_run_eagerly())
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
@keras_parameterized.run_all_keras_modes
def test_default_sample_weight(self):
"""Verifies that fit works without having to set sample_weight."""
num_classes = 5
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
x = np.random.random((10, timesteps, input_dim))
y = np.random.random((10, timesteps, num_classes))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
# sample_weight_mode is a list and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=[None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a list and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is None
model.compile(
optimizer, loss='mse', sample_weight_mode={'time_distributed': None},
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is `temporal`
model.compile(
optimizer,
loss='mse',
sample_weight_mode={'time_distributed': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
def test_sample_weight_tensor(self):
"""Tests that sample weight may be defined as a tensor in the graph."""
with context.graph_mode():
# Create a simple pass-through model
input_layer = keras.layers.Input(shape=1, name='input_layer')
model = keras.Model(inputs=input_layer, outputs=input_layer)
model.compile(
loss='mean_absolute_error',
optimizer='adam')
# Prepare sample weights iterator tensor
sample_weights = array_ops.constant(
[[0, .4, 1, 1], [2, .4, .3, 1]])
dataset = dataset_ops.Dataset.from_tensor_slices(sample_weights)
sample_weights = dataset_ops.make_one_shot_iterator(dataset).get_next()
sample_weights = training_utils.standardize_sample_weights(
sample_weights, model.output_names)
# Update model loss with sample weight tensor.
model._compile_weights_loss_and_weighted_metrics(sample_weights)
feeds = {'input_layer:0': [[0], [0], [0], [0]],
'input_layer_target:0': [[1], [1], [1], [1]]}
with self.cached_session() as sess:
self.assertAllClose(
(.4 + 1 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds))
self.assertAllClose(
(2+ .4 + .3 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds))
def test_prepare_sample_weights(self):
# pylint:disable=anomalous-backslash-in-string
input_layer = keras.layers.Input(shape=1, name='input_layer')
model = keras.Model(inputs=input_layer, outputs=[input_layer, input_layer])
sample_weights = array_ops.constant([0, .4, 1, 1])
temporal_weights = array_ops.constant([[1, 2], [3, 4], [5, 6]])
model.compile(
loss='mean_absolute_error',
optimizer='adam',
sample_weight_mode=None)
with self.assertRaises(AssertionError):
model._prepare_sample_weights([sample_weights, sample_weights])
model.compile(loss='mean_absolute_error', optimizer='adam',
sample_weight_mode='temporal')
model._prepare_sample_weights([temporal_weights, temporal_weights])
with self.assertRaisesRegexp(ValueError, 'Expected shape \[None, None\]'):
model._prepare_sample_weights([sample_weights, sample_weights])
with self.assertRaisesRegexp(ValueError,
'sample weights must have same length as the '
'number of outputs'):
model._prepare_sample_weights([temporal_weights])
model.compile(loss='mean_absolute_error', optimizer='adam',
sample_weight_mode='samplewise')
model._prepare_sample_weights([sample_weights, sample_weights])
with self.assertRaisesRegexp(ValueError, 'Expected shape \[None\]'):
model._prepare_sample_weights([temporal_weights, temporal_weights])
# pylint:enable=anomalous-backslash-in-string
@keras_parameterized.run_all_keras_modes
class MaskingTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Masking(mask_value=0),
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))
]
model = testing_utils.get_model_from_layers(layers, input_shape)
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
def test_masking(self):
model = self._get_model(input_shape=(2, 1))
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
def test_masking_deferred(self):
model = self._get_model()
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
def test_mask_argument_in_layer(self):
# Test that the mask argument gets correctly passed to a layer in the
# functional API.
class CustomMaskedLayer(keras.layers.Layer):
def __init__(self):
super(CustomMaskedLayer, self).__init__()
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
return inputs
def compute_output_shape(self, input_shape):
return input_shape
x = np.random.random((5, 3))
inputs = keras.layers.Input((3,))
masked = keras.layers.Masking(mask_value=0)(inputs)
outputs = CustomMaskedLayer()(masked)
model = keras.Model(inputs, outputs)
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.random((5, 3))
model.train_on_batch(x, y)
class TestDynamicTrainability(keras_parameterized.TestCase):
def test_trainable_warning(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3))
model.trainable = False
model.compile('rmsprop', 'mse')
model.trainable = True
model.train_on_batch(x, y)
self.assertRaises(Warning)
def test_trainable_argument(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
# test with nesting
inputs = keras.layers.Input(shape=(3,))
output = model(inputs)
model = keras.models.Model(inputs, output)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
def test_layer_trainability_switch(self):
with self.cached_session():
# with constructor argument, in Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, trainable=False, input_dim=1))
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Sequential
model = keras.models.Sequential()
layer = keras.layers.Dense(2, input_dim=1)
model.add(layer)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
# with constructor argument, in Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2, trainable=False)(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Model
x = keras.layers.Input(shape=(1,))
layer = keras.layers.Dense(2)
y = layer(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_model_trainability_switch(self):
with self.cached_session():
# a non-trainable model has no trainable weights
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
# same for Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=1))
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_nested_model_trainability(self):
with self.cached_session():
# a Sequential inside a Model
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Sequential inside a Sequential
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Sequential
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
class TestTrainingWithDataTensors(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):
# TODO(kaftan) Test seems to not work, file ticket
if context.executing_eagerly():
self.skipTest('Skipping eager execution.')
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
# Test with dynamic shape
inputs = array_ops.placeholder_with_default(
np.zeros((2, 3)), shape=tensor_shape.TensorShape([None, 3]))
targets = array_ops.placeholder_with_default(
np.zeros((2, 4)), shape=tensor_shape.TensorShape([None, 4]))
self.assertEqual(inputs.shape.dims[0].value, None)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):
# TODO(kaftan) Test seems to not work, file ticket
if context.executing_eagerly():
self.skipTest('Skipping eager execution.')
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
input_a_tf = keras.backend.zeros(shape=(10, 3))
input_b_tf = keras.backend.zeros(shape=(10, 3))
output_d_tf = keras.backend.zeros(shape=(10, 4))
output_e_tf = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
steps_per_epoch=2,
verbose=0)
with self.assertRaisesRegexp(ValueError,
'should specify the `steps_per_epoch`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
# Test with dictionary inputs
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
epochs=1,
steps_per_epoch=2,
verbose=0)
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
validation_data=({'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf}),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
model.train_on_batch(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf})
# Test with validation data
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
validation_data=([input_a_tf, input_b_tf],
[output_d_tf, output_e_tf]),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
# Test with validation split
with self.assertRaisesRegexp(ValueError,
'you cannot use `validation_split`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=2,
steps_per_epoch=2,
verbose=0,
validation_split=0.2,
validation_steps=2)
# Test evaluation / prediction methods
model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
steps=2, verbose=0)
model.predict([input_a_tf, input_b_tf], steps=2)
model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
@tf_test_util.run_deprecated_v1
def test_model_with_input_feed_tensor(self):
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
with self.cached_session():
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
model = keras.models.Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
self.assertEqual(len(out), 2)
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=3)
_ = model.evaluate(None, output_a_np, steps=3)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=10)
_ = model.evaluate(None, output_a_np, steps=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
def test_model_with_partial_loss(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
@tf_test_util.run_deprecated_v1
def test_model_with_external_loss(self):
with self.cached_session():
# None loss, only regularization loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a)
model = keras.models.Model(a, [a_2, a_3])
model.add_loss(keras.backend.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test model with no external data at all.
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None, epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Test multi-output model with no external data at all.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_1 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_1)
model = keras.models.Model(a, [a_1, a_2])
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10, verbose=1)
out = model.predict(None, steps=3)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (10 * 3, 4))
self.assertEqual(out[1].shape, (10 * 3, 4))
def test_target_tensors(self):
with self.cached_session():
# single-output, as list
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = keras.backend.variable(target_val)
model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
model.train_on_batch(input_val, None)
# single-output, as single tensor
model.compile(optimizer='rmsprop', loss='mse', target_tensors=target)
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense': target})
model.train_on_batch(input_val, None)
# test invalid arguments
with self.assertRaises(TypeError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=set())
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target, target])
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense2': None})
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target])
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = keras.backend.variable(target_val_a)
target_b = keras.backend.variable(target_val_b)
inputs = keras.layers.Input(shape=(4,))
output_a = keras.layers.Dense(4, name='dense_a')(inputs)
output_b = keras.layers.Dense(4, name='dense_b')(inputs)
model = keras.models.Model(inputs, [output_a, output_b])
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_a': target_a,
'dense_b': target_b})
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None,
sample_weight={'dense_a': np.random.random((10,))})
@tf_test_util.run_deprecated_v1
def test_model_custom_target_tensors(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
y = keras.backend.placeholder([10, 4], name='y')
y1 = keras.backend.placeholder([10, 3], name='y1')
y2 = keras.backend.placeholder([7, 5], name='y2')
model = keras.models.Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
# test list of target tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np], {
'dense_1': np.random.random((10,)),
'dropout': np.random.random((10,))
})
# test dictionary of target_tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'does_not_exist': y2})
# test dictionary of target_tensors
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'dense_1': y, 'dropout': y1})
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np], {
'dense_1': np.random.random((10,)),
'dropout': np.random.random((10,))
})
# test with custom TF placeholder as target
pl_target_a = keras.backend.array_ops.placeholder('float32',
shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
class TestTrainingWithMetrics(keras_parameterized.TestCase):
"""Training tests related to metrics."""
@keras_parameterized.run_all_keras_modes
def test_metrics_names(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
metrics = ['mse', metrics_module.BinaryAccuracy()]
model.compile(optimizer, loss='mae', metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
mse_metric = 'mse' if tf2.enabled() else 'mean_squared_error'
reference_metric_names = [
'loss', 'dense_loss', 'dropout_loss', 'dense_' + mse_metric,
'dense_binary_accuracy', 'dropout_' + mse_metric,
'dropout_binary_accuracy'
]
self.assertEqual(reference_metric_names, model.metrics_names)
# Verify that model metric names are not altered during training.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
self.assertEqual(reference_metric_names, model.metrics_names)
@keras_parameterized.run_all_keras_modes
def test_metric_state_reset_between_fit_and_evaluate(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3, activation='relu', input_dim=4))
model.add(keras.layers.Dense(1, activation='sigmoid'))
acc_obj = metrics_module.BinaryAccuracy()
model.compile(
loss='mae',
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
x_train = np.random.random((100, 4))
y_train = np.random.random((100, 1))
model.fit(x_train, y_train, batch_size=5, epochs=2)
self.assertEqual(self.evaluate(acc_obj.count), 100)
x_test = np.random.random((10, 4))
y_test = np.random.random((10, 1))
model.evaluate(x_test, y_test, batch_size=5)
self.assertEqual(self.evaluate(acc_obj.count), 10)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
def test_metrics_valid_compile_input_formats(self):
inp_1 = keras.layers.Input(shape=(1,), name='input_1')
inp_2 = keras.layers.Input(shape=(1,), name='input_2')
x = keras.layers.Dense(3, kernel_initializer='ones', trainable=False)
out_1 = keras.layers.Dense(
1, kernel_initializer='ones', name='output_1', trainable=False)
out_2 = keras.layers.Dense(
1, kernel_initializer='ones', name='output_2', trainable=False)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
# list of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[keras.metrics.MeanSquaredError()],
weighted_metrics=[keras.metrics.MeanSquaredError()],
run_eagerly=testing_utils.should_run_eagerly())
# list of list of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[
keras.metrics.MeanSquaredError(),
[keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()]
],
weighted_metrics=[
keras.metrics.MeanSquaredError(),
[keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()]
],
run_eagerly=testing_utils.should_run_eagerly())
# dict of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics={
'output_1':
keras.metrics.MeanSquaredError(),
'output_2': [
keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()
],
},
weighted_metrics={
'output_1':
keras.metrics.MeanSquaredError(),
'output_2': [
keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()
],
},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_invalid_metrics(self):
num_classes = 5
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
with self.assertRaisesRegexp(
TypeError, 'Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: '):
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=metrics_module.CategoricalAccuracy(),
run_eagerly=testing_utils.should_run_eagerly())
inp = keras.layers.Input(shape=(1,))
x = keras.layers.Dense(3, activation='relu')(inp)
out_1 = keras.layers.Dense(1, activation='sigmoid', name='output_1')(x)
out_2 = keras.layers.Dense(1, activation='sigmoid', name='output_2')(x)
model = keras.models.Model(inp, [out_1, out_2])
with self.assertRaisesRegex(
ValueError, 'When passing a list of lists as `metrics`, '
'it should have one entry per model output. '
'The model has 2 outputs, but you passed metrics='):
model.compile('rmsprop', loss='mse', metrics=[['mse']])
with self.assertRaisesRegex(
ValueError,
r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only '
r'expected following keys: \[\'output_1\', \'output_2\'\]'):
model.compile(
optimizer='rmsprop',
loss='mse',
metrics={
'output_1': 'mse',
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly())
with self.assertRaisesRegex(
ValueError,
r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only '
r'expected following keys: \[\'output_1\', \'output_2\'\]'):
model.compile(
optimizer='rmsprop',
loss='mse',
weighted_metrics={
'output_1': 'mse',
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_metrics_masking(self):
if testing_utils.should_run_eagerly():
self.skipTest('b/120495761')
with self.cached_session():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='ones')))
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='mse',
weighted_metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
# verify that masking is applied.
x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
scores = model.train_on_batch(x, y)
self.assertArrayNear(scores, [0.25, 0.75], 0.1)
# verify that masking is combined with sample weights.
w = np.array([3, 2, 4])
scores = model.train_on_batch(x, y, sample_weight=w)
self.assertArrayNear(scores, [0.3328, 0.8], 0.001)
@keras_parameterized.run_all_keras_modes
def test_add_metric_with_tensor_on_model(self):
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
model = keras.models.Model(x, y)
model.add_metric(
math_ops.reduce_sum(y), name='metric_1', aggregation='mean')
if context.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, 'Expected a symbolic Tensor for the metric value'):
model.add_metric(mean_result, name='metric_2')
with self.assertRaisesRegex(
ValueError, 'Using the result of calling a `Metric` object '):
with keras.backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name='metric_2')(y))
model.compile(
'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly())
inputs = np.ones(shape=(10, 1))
targets = np.ones(shape=(10, 1))
history = model.fit(
inputs,
targets,
epochs=2,
batch_size=5,
validation_data=(inputs, targets))
self.assertEqual(history.history['metric_1'][-1], 5)
self.assertEqual(history.history['val_metric_1'][-1], 5)
eval_results = model.evaluate(inputs, targets, batch_size=5)
self.assertEqual(eval_results[-1], 5)
model.predict(inputs, batch_size=5)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
@keras_parameterized.run_all_keras_modes
def test_add_metric_in_model_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean = metrics_module.Mean(name='metric_1')
def call(self, x):
self.add_metric(
math_ops.reduce_sum(x), name='metric_2', aggregation='mean')
# Provide same name as in the instance created in __init__
# for eager mode
self.add_metric(self.mean(x), name='metric_1')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['val_metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_2'][-1], 5, 0)
self.assertAlmostEqual(history.history['val_metric_2'][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertAlmostEqual(eval_results[1], 1, 0)
self.assertAlmostEqual(eval_results[2], 5, 0)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_add_metric_in_layer_call(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable(
'a', (1, 1), initializer='ones', trainable=False)
self.built = True
def call(self, inputs):
self.add_metric(
math_ops.reduce_sum(inputs), name='metric_1', aggregation='mean')
return inputs + 1
layers = [
TestLayer(input_shape=(1,)),
keras.layers.Dense(2, kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertEqual(history.history['metric_1'][-1], 5)
self.assertAlmostEqual(history.history['val_metric_1'][-1], 5, 0)
@keras_parameterized.run_all_keras_modes
def test_model_metrics_list(self):
class LayerWithAddMetric(keras.layers.Layer):
def __init__(self):
super(LayerWithAddMetric, self).__init__()
self.dense = keras.layers.Dense(1, kernel_initializer='ones')
def __call__(self, inputs):
outputs = self.dense(inputs)
self.add_metric(
math_ops.reduce_sum(outputs), name='metric_1', aggregation='mean')
return outputs
class LayerWithNestedAddMetricLayer(keras.layers.Layer):
def __init__(self):
super(LayerWithNestedAddMetricLayer, self).__init__()
self.layer = LayerWithAddMetric()
def call(self, inputs):
outputs = self.layer(inputs)
self.add_metric(
math_ops.reduce_sum(outputs), name='metric_2', aggregation='mean')
return outputs
x = keras.layers.Input(shape=(1,))
y = LayerWithNestedAddMetricLayer()(x)
model = keras.models.Model(x, y)
model.add_metric(
math_ops.reduce_sum(y), name='metric_3', aggregation='mean')
if context.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, 'Expected a symbolic Tensor for the metric value'):
model.add_metric(mean_result, name='metric_4')
with self.assertRaisesRegex(
ValueError, 'Using the result of calling a `Metric` object '):
with keras.backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name='metric_4')(y))
model.compile(
'sgd',
loss='mse',
metrics=[metrics_module.Accuracy('metric_4')],
run_eagerly=testing_utils.should_run_eagerly())
# Verify that the metrics added using `compile` and `add_metric` API are
# included
self.assertEqual([m.name for m in model._compile_metrics], ['metric_4'])
self.assertEqual([m.name for m in model.metrics],
['metric_4', 'metric_2', 'metric_1', 'metric_3'])
@keras_parameterized.run_all_keras_modes
def test_model_metrics_list_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
def call(self, x):
self.add_metric(
math_ops.reduce_sum(x), name='metric_1', aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(0.01),
metrics=[metrics_module.Accuracy('acc')],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertEqual([m.name for m in model._compile_metrics], ['acc'])
self.assertEqual([m.name for m in model.metrics], ['acc', 'metric_1'])
@keras_parameterized.run_all_keras_modes
def test_multiple_add_metric_calls(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean1 = metrics_module.Mean(name='metric_1')
self.mean2 = metrics_module.Mean(name='metric_2')
def call(self, x):
self.add_metric(self.mean2(x), name='metric_2')
self.add_metric(self.mean1(x), name='metric_1')
self.add_metric(
math_ops.reduce_sum(x), name='metric_3', aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_2'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_3'][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertArrayNear(eval_results[1:4], [1, 1, 5], 0.1)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_invalid_metric_tensor(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.built = True
def call(self, inputs):
self.add_metric(math_ops.reduce_mean(inputs), name='metric_1')
return inputs + 1
layers = [TestLayer(input_shape=(1,))]
layers.append(keras.layers.Dense(2, kernel_initializer='ones'))
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegexp(
ValueError,
'We do not support adding an aggregated metric result tensor that is '
'not the output of a `tf.keras.metrics.Metric` metric instance.'):
model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_duplicate_metric_name_in_add_metric(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean = metrics_module.Mean(name='metric_1')
self.mean2 = metrics_module.Mean(name='metric_1')
def call(self, x):
self.add_metric(self.mean(x), name='metric_1')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegexp(
ValueError,
'Please provide different names for the metrics you have added. '
'We found 2 metrics with the name: "metric_1"'):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_add_metric_without_name(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
def call(self, x):
self.add_metric(math_ops.reduce_sum(x), aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegex(ValueError,
'Please provide a name for your metric like'):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_add_metric_correctness(self):
inputs = keras.Input(shape=(1,))
targets = keras.Input(shape=(1,))
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
self.mae = metrics_module.MeanAbsoluteError(name='mae_1')
def call(self, inputs):
inputs, targets = inputs
outputs = inputs + self.bias
self.add_metric(self.mae(targets, outputs), name='mae_1')
return outputs
outputs = Bias()([inputs, targets])
model = keras.Model([inputs, targets], outputs)
model.add_metric(
metrics_module.mean_absolute_error(targets, outputs),
name='mae_2',
aggregation='mean')
model.compile(
loss='mae',
optimizer=keras.optimizer_v2.gradient_descent.SGD(0.1),
metrics=[metrics_module.MeanAbsoluteError(name='mae_3')],
run_eagerly=testing_utils.should_run_eagerly())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
history = model.fit([x, y], y, batch_size=3, epochs=5)
expected_val = [1., 0.9, 0.8, 0.7, 0.6]
for key in ['loss', 'mae_1', 'mae_2', 'mae_3']:
self.assertAllClose(history.history[key], expected_val, 1e-3)
class BareUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs):
state_ops.assign_add(self.counter, 1)
return math_ops.cast(self.counter, inputs.dtype) * inputs
class LambdaUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs):
# Make sure update isn't run twice.
self.add_update(lambda: state_ops.assign_add(self.counter, 1))
return math_ops.cast(self.counter, inputs.dtype) * inputs
class NestedUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.layer = BareUpdateLayer()
self.layer.build(input_shape)
@property
def counter(self):
return self.layer.counter
def call(self, inputs):
return self.layer(inputs)
class SubgraphUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
if training:
self.counter.assign(self.counter + 1)
return inputs
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestAutoUpdates(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(('bare_update', BareUpdateLayer()),
('lambda_update', LambdaUpdateLayer()),
('nested_update', NestedUpdateLayer()))
def test_updates_in_model(self, layer):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@keras_parameterized.run_with_all_model_types
def test_lambda_updates_trainable_false(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
layer = LambdaUpdateLayer()
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
layer.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@keras_parameterized.run_with_all_model_types
def test_subgraph_updates_in_model(self):
layer = SubgraphUpdateLayer()
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@parameterized.named_parameters(('bare_update', BareUpdateLayer()),
('lambda_update', LambdaUpdateLayer()),
('nested_update', NestedUpdateLayer()))
def test_updates_standalone_layer(self, layer):
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
def test_trainable_false_standalone_layer(self):
layer = LambdaUpdateLayer()
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
layer.trainable = False
y = layer(np.ones((10, 10)))
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
@keras_parameterized.run_with_all_model_types
def test_batchnorm_trainable_false(self):
bn = keras.layers.BatchNormalization()
model = testing_utils.get_model_from_layers([bn, keras.layers.Dense(1)],
input_shape=(10,))
bn.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y, batch_size=2, epochs=1)
self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,)))
self.assertAllEqual(self.evaluate(bn.moving_variance), np.ones((10,)))
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.