repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
waseem18/oh-mainline | refs/heads/master | vendor/packages/twisted/twisted/internet/iocpreactor/__init__.py | 84 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I/O Completion Ports reactor
"""
from twisted.internet.iocpreactor.reactor import install
__all__ = ['install']
|
agrista/odoo-saas | refs/heads/master | addons/website_crm_partner_assign/__init__.py | 3964 | import controllers
import models
|
giorda-a/Domoleaf | refs/heads/master | domomaster/etc/domoleaf/cron/CronLogsCalc.py | 2 | #!/usr/bin/python3
import sys
sys.path.append('/usr/lib/domoleaf')
import socket
import json
from DaemonConfigParser import *;
if __name__ == "__main__":
try:
print("hey cron");
parser = DaemonConfigParser('/etc/domoleaf/master.conf')
ip = '127.0.0.1'
port = parser.getValueFromSection('listen', 'port_cmd')
s = socket.create_connection((ip, port))
obj = {
"packet_type": "calc_logs",
"data": []
}
obj_str = json.JSONEncoder().encode(obj)
s.send(obj_str.encode())
s.close()
except Exception as e:
print(str(e))
|
yanchen036/tensorflow | refs/heads/master | tensorflow/examples/adding_an_op/cuda_op.py | 192 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
if tf.test.is_built_with_cuda():
_cuda_op_module = tf.load_op_library(os.path.join(
tf.resource_loader.get_data_files_path(), 'cuda_op_kernel.so'))
add_one = _cuda_op_module.add_one
|
django-notifications/django-notifications | refs/heads/master | notifications/tests/tests.py | 1 | '''
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
'''
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines,missing-docstring
import json
import pytz
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
from django.template import Context, Template
from django.test import RequestFactory, TestCase
from django.test.utils import CaptureQueriesContext
from django.utils import timezone
from django.utils.timezone import localtime, utc
from notifications.base.models import notify_handler
from notifications.signals import notify
from notifications.utils import id2slug
from swapper import load_model
Notification = load_model('notifications', 'Notification')
try:
# Django >= 1.7
from django.test import override_settings # noqa
except ImportError:
# Django <= 1.6
from django.test.utils import override_settings # noqa
try:
# Django >= 1.7
from django.urls import reverse
except ImportError:
# Django <= 1.6
from django.core.urlresolvers import reverse # pylint: disable=no-name-in-module,import-error
class NotificationTest(TestCase):
''' Django notifications automated tests '''
@override_settings(USE_TZ=True)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_use_timezone(self):
from_user = User.objects.create(username="from", password="pwd", email="example@example.com")
to_user = User.objects.create(username="to", password="pwd", email="example@example.com")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = (
timezone.now().replace(tzinfo=utc) - localtime(notification.timestamp, pytz.timezone(settings.TIME_ZONE))
)
self.assertTrue(delta.seconds < 60)
# The delta between the two events will still be less than a second despite the different timezones
# The call to now and the immediate call afterwards will be within a short period of time, not 8 hours as the
# test above was originally.
@override_settings(USE_TZ=False)
@override_settings(TIME_ZONE='Asia/Shanghai')
def test_disable_timezone(self):
from_user = User.objects.create(username="from2", password="pwd", email="example@example.com")
to_user = User.objects.create(username="to2", password="pwd", email="example@example.com")
notify.send(from_user, recipient=to_user, verb='commented', action_object=from_user)
notification = Notification.objects.get(recipient=to_user)
delta = timezone.now() - notification.timestamp
self.assertTrue(delta.seconds < 60)
class NotificationManagersTest(TestCase):
''' Django notifications Manager automated tests '''
def setUp(self):
self.message_count = 10
self.other_user = User.objects.create(username="other1", password="pwd", email="example@example.com")
self.from_user = User.objects.create(username="from2", password="pwd", email="example@example.com")
self.to_user = User.objects.create(username="to2", password="pwd", email="example@example.com")
self.to_group = Group.objects.create(name="to2_g")
self.to_user_list = User.objects.all()
self.to_group.user_set.add(self.to_user)
self.to_group.user_set.add(self.other_user)
for _ in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
# Send notification to group
notify.send(self.from_user, recipient=self.to_group, verb='commented', action_object=self.from_user)
self.message_count += self.to_group.user_set.count()
# Send notification to user list
notify.send(self.from_user, recipient=self.to_user_list, verb='commented', action_object=self.from_user)
self.message_count += len(self.to_user_list)
def test_notify_send_return_val(self):
results = notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
for result in results:
if result[0] is notify_handler:
self.assertEqual(len(result[1]), 1)
# only check types for now
self.assertEqual(type(result[1][0]), Notification)
def test_notify_send_return_val_group(self): # pylint: disable=invalid-name
results = notify.send(self.from_user, recipient=self.to_group, verb='commented', action_object=self.from_user)
for result in results:
if result[0] is notify_handler:
self.assertEqual(len(result[1]), self.to_group.user_set.count())
for notification in result[1]:
# only check types for now
self.assertEqual(type(notification), Notification)
def test_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
notification = Notification.objects.filter(recipient=self.to_user).first()
notification.mark_as_read()
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
for notification in Notification.objects.unread():
self.assertTrue(notification.unread)
def test_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
notification = Notification.objects.filter(recipient=self.to_user).first()
notification.mark_as_read()
self.assertEqual(Notification.objects.read().count(), 1)
for notification in Notification.objects.read():
self.assertFalse(notification.unread)
def test_mark_all_as_read_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(self.to_user.notifications.unread().count(), 0)
@override_settings(DJANGO_NOTIFICATIONS_CONFIG={
'SOFT_DELETE': True
}) # pylint: disable=invalid-name
def test_mark_all_as_read_manager_with_soft_delete(self):
# even soft-deleted notifications should be marked as read
# refer: https://github.com/django-notifications/django-notifications/issues/126
to_delete = Notification.objects.filter(recipient=self.to_user).order_by('id')[0]
to_delete.deleted = True
to_delete.save()
self.assertTrue(Notification.objects.filter(recipient=self.to_user).order_by('id')[0].unread)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertFalse(Notification.objects.filter(recipient=self.to_user).order_by('id')[0].unread)
def test_mark_all_as_unread_manager(self):
self.assertEqual(Notification.objects.unread().count(), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
self.assertEqual(self.to_user.notifications.unread().count(), 0)
Notification.objects.filter(recipient=self.to_user).mark_all_as_unread()
self.assertEqual(Notification.objects.unread().count(), self.message_count)
def test_mark_all_deleted_manager_without_soft_delete(self): # pylint: disable=invalid-name
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.active)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_deleted)
self.assertRaises(ImproperlyConfigured, Notification.objects.mark_all_as_active)
@override_settings(DJANGO_NOTIFICATIONS_CONFIG={
'SOFT_DELETE': True
})
def test_mark_all_deleted_manager(self):
notification = Notification.objects.filter(recipient=self.to_user).first()
notification.mark_as_read()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
Notification.objects.mark_all_as_deleted()
self.assertEqual(Notification.objects.read().count(), 0)
self.assertEqual(Notification.objects.unread().count(), 0)
self.assertEqual(Notification.objects.active().count(), 0)
self.assertEqual(Notification.objects.deleted().count(), self.message_count)
Notification.objects.mark_all_as_active()
self.assertEqual(Notification.objects.read().count(), 1)
self.assertEqual(Notification.objects.unread().count(), self.message_count-1)
self.assertEqual(Notification.objects.active().count(), self.message_count)
self.assertEqual(Notification.objects.deleted().count(), 0)
class NotificationTestPages(TestCase):
''' Django notifications automated page tests '''
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create_user(username="from", password="pwd", email="example@example.com")
self.to_user = User.objects.create_user(username="to", password="pwd", email="example@example.com")
self.to_user.is_staff = True
self.to_user.save()
for _ in range(self.message_count):
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
def logout(self):
self.client.post(reverse('admin:logout')+'?next=/', {})
def login(self, username, password):
self.logout()
response = self.client.post(reverse('login'), {'username': username, 'password': password})
self.assertEqual(response.status_code, 302)
return response
def test_all_messages_page(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.all()))
def test_unread_messages_pages(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count)
for index, notification in enumerate(self.to_user.notifications.all()):
if index % 3 == 0:
response = self.client.get(reverse('notifications:mark_as_read', args=[id2slug(notification.id)]))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertTrue(len(response.context['notifications']) < self.message_count)
response = self.client.get(reverse('notifications:mark_all_as_read'))
self.assertRedirects(response, reverse('notifications:unread'))
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), 0)
def test_next_pages(self):
self.login('to', 'pwd')
query_parameters = '?var1=hello&var2=world'
response = self.client.get(reverse('notifications:mark_all_as_read'),data={
"next": reverse('notifications:unread') + query_parameters,
})
self.assertRedirects(response, reverse('notifications:unread') + query_parameters)
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_read', args=[slug]), data={
"next": reverse('notifications:unread') + query_parameters,
})
self.assertRedirects(response, reverse('notifications:unread') + query_parameters)
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:mark_as_unread', args=[slug]), {
"next": reverse('notifications:unread') + query_parameters,
})
self.assertRedirects(response, reverse('notifications:unread') + query_parameters)
def test_delete_messages_pages(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.all()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
@override_settings(DJANGO_NOTIFICATIONS_CONFIG={
'SOFT_DELETE': True
}) # pylint: disable=invalid-name
def test_soft_delete_messages_manager(self):
self.login('to', 'pwd')
slug = id2slug(self.to_user.notifications.first().id)
response = self.client.get(reverse('notifications:delete', args=[slug]))
self.assertRedirects(response, reverse('notifications:all'))
response = self.client.get(reverse('notifications:all'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.active()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
response = self.client.get(reverse('notifications:unread'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['notifications']), len(self.to_user.notifications.unread()))
self.assertEqual(len(response.context['notifications']), self.message_count-1)
def test_unread_count_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], 0)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_unread_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['unread_count'])
self.assertEqual(data['unread_count'], 1)
def test_all_count_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_all_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['all_count'])
self.assertEqual(data['all_count'], self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_all_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['all_count'])
self.assertEqual(data['all_count'], self.message_count)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_all_notification_count'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(list(data.keys()), ['all_count'])
self.assertEqual(data['all_count'], self.message_count + 1)
def test_unread_list_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], self.message_count)
self.assertEqual(len(data['unread_list']), self.message_count)
response = self.client.get(reverse('notifications:live_unread_notification_list'), data={"max": 5})
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], self.message_count)
self.assertEqual(len(data['unread_list']), 5)
# Test with a bad 'max' value
response = self.client.get(reverse('notifications:live_unread_notification_list'), data={
"max": "this_is_wrong",
})
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], self.message_count)
self.assertEqual(len(data['unread_list']), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 0)
self.assertEqual(len(data['unread_list']), 0)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['unread_count', 'unread_list'])
self.assertEqual(data['unread_count'], 1)
self.assertEqual(len(data['unread_list']), 1)
self.assertEqual(data['unread_list'][0]['verb'], 'commented')
self.assertEqual(data['unread_list'][0]['slug'], id2slug(data['unread_list'][0]['id']))
def test_all_list_api(self):
self.login('to', 'pwd')
response = self.client.get(reverse('notifications:live_all_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['all_count', 'all_list'])
self.assertEqual(data['all_count'], self.message_count)
self.assertEqual(len(data['all_list']), self.message_count)
response = self.client.get(reverse('notifications:live_all_notification_list'), data={"max": 5})
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['all_count', 'all_list'])
self.assertEqual(data['all_count'], self.message_count)
self.assertEqual(len(data['all_list']), 5)
# Test with a bad 'max' value
response = self.client.get(reverse('notifications:live_all_notification_list'), data={
"max": "this_is_wrong",
})
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['all_count', 'all_list'])
self.assertEqual(data['all_count'], self.message_count)
self.assertEqual(len(data['all_list']), self.message_count)
Notification.objects.filter(recipient=self.to_user).mark_all_as_read()
response = self.client.get(reverse('notifications:live_all_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['all_count', 'all_list'])
self.assertEqual(data['all_count'], self.message_count)
self.assertEqual(len(data['all_list']), self.message_count)
notify.send(self.from_user, recipient=self.to_user, verb='commented', action_object=self.from_user)
response = self.client.get(reverse('notifications:live_all_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(list(data.keys())), ['all_count', 'all_list'])
self.assertEqual(data['all_count'], self.message_count + 1)
self.assertEqual(len(data['all_list']), self.message_count)
self.assertEqual(data['all_list'][0]['verb'], 'commented')
self.assertEqual(data['all_list'][0]['slug'], id2slug(data['all_list'][0]['id']))
def test_unread_list_api_mark_as_read(self): # pylint: disable=invalid-name
self.login('to', 'pwd')
num_requested = 3
response = self.client.get(
reverse('notifications:live_unread_notification_list'),
data={"max": num_requested, "mark_as_read": 1}
)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['unread_count'],
self.message_count - num_requested)
self.assertEqual(len(data['unread_list']), num_requested)
response = self.client.get(
reverse('notifications:live_unread_notification_list'),
data={"max": num_requested, "mark_as_read": 1}
)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['unread_count'],
self.message_count - 2*num_requested)
self.assertEqual(len(data['unread_list']), num_requested)
def test_live_update_tags(self):
from django.shortcuts import render
self.login('to', 'pwd')
factory = RequestFactory()
request = factory.get('/notification/live_updater')
request.user = self.to_user
render(request, 'notifications/test_tags.html', {'request': request})
# TODO: Add more tests to check what is being output.
def test_anon_user_gets_nothing(self):
response = self.client.post(reverse('notifications:live_unread_notification_count'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['unread_count'], 0)
response = self.client.post(reverse('notifications:live_unread_notification_list'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['unread_count'], 0)
self.assertEqual(data['unread_list'], [])
class NotificationTestExtraData(TestCase):
''' Django notifications automated extra data tests '''
def setUp(self):
self.message_count = 1
self.from_user = User.objects.create_user(username="from", password="pwd", email="example@example.com")
self.to_user = User.objects.create_user(username="to", password="pwd", email="example@example.com")
self.to_user.is_staff = True
self.to_user.save()
for _ in range(self.message_count):
notify.send(
self.from_user,
recipient=self.to_user,
verb='commented',
action_object=self.from_user,
url="/learn/ask-a-pro/q/test-question-9/299/",
other_content="Hello my 'world'"
)
def logout(self):
self.client.post(reverse('admin:logout')+'?next=/', {})
def login(self, username, password):
self.logout()
response = self.client.post(reverse('login'), {'username': username, 'password': password})
self.assertEqual(response.status_code, 302)
return response
def test_extra_data(self):
self.login('to', 'pwd')
response = self.client.post(reverse('notifications:live_unread_notification_list'))
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['unread_list'][0]['data']['url'], "/learn/ask-a-pro/q/test-question-9/299/")
self.assertEqual(data['unread_list'][0]['data']['other_content'], "Hello my 'world'")
class TagTest(TestCase):
''' Django notifications automated tags tests '''
def setUp(self):
self.message_count = 1
self.from_user = User.objects.create_user(username="from", password="pwd", email="example@example.com")
self.to_user = User.objects.create_user(username="to", password="pwd", email="example@example.com")
self.to_user.is_staff = True
self.to_user.save()
for _ in range(self.message_count):
notify.send(
self.from_user,
recipient=self.to_user,
verb='commented',
action_object=self.from_user,
url="/learn/ask-a-pro/q/test-question-9/299/",
other_content="Hello my 'world'"
)
def tag_test(self, template, context, output):
t = Template('{% load notifications_tags %}'+template)
c = Context(context)
self.assertEqual(t.render(c), output)
def test_has_notification(self):
template = "{{ user|has_notification }}"
context = {"user":self.to_user}
output = u"True"
self.tag_test(template, context, output)
class AdminTest(TestCase):
app_name = "notifications"
def setUp(self):
self.message_count = 10
self.from_user = User.objects.create_user(username="from", password="pwd", email="example@example.com")
self.to_user = User.objects.create_user(username="to", password="pwd", email="example@example.com")
self.to_user.is_staff = True
self.to_user.is_superuser = True
self.to_user.save()
for _ in range(self.message_count):
notify.send(
self.from_user,
recipient=self.to_user,
verb='commented',
action_object=self.from_user,
)
def test_list(self):
self.client.login(username='to', password='pwd')
with CaptureQueriesContext(connection=connection) as context:
response = self.client.get(reverse('admin:{0}_notification_changelist'.format(self.app_name)))
self.assertLessEqual(len(context), 6)
self.assertEqual(response.status_code, 200, response.content)
|
VestalNetSciHigh/store | refs/heads/master | code/calculate_stats.py | 1 | __author__ = 'VestalNetSciHigh'
""" Read in data (and array of distance metrics) and calculate various stats """
import time
# from numpy import loadtxt
import numpy
import settings
import datetime
start_time = time.time()
print "Started: " + datetime.datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S')
output_string = ""
# load output from parseCSV
distances = numpy.loadtxt(settings.OUTPUT+"\\real_distances.dat")
# sort all distances
ordered_distances = []
for i in xrange(distances.__len__()):
for id in distances[i]:
ordered_distances.append(id)
ordered_distances.sort()
output_string += "Total distances: " + str(ordered_distances.__len__()) + "\n"
# compute mean distance
mean_distance = sum(ordered_distances) / ordered_distances.__len__()
output_string += "Mean distance: " + str(mean_distance) + "\n"
settings.output(str(mean_distance), "mean_distance", ext=".dat", mode="write")
# compute root mean square
sum_squares = 0
for dist in ordered_distances:
sum_squares += dist * dist
root_mean_square = pow(sum_squares / float(ordered_distances.__len__()), 0.5)
output_string += "Root mean square: " + str(root_mean_square) + "\n"
# minimum distance (most dissimilar)
most_dissimilar = ordered_distances[0]
output_string += "Most dissimilar (min): " + str(most_dissimilar) + "\n"
settings.output(str(most_dissimilar), "most_dissimilar", ext=".dat", mode="write")
# compute median distance
median_distance = 0
index_float = ordered_distances.__len__() / 2.0
index_whole = ordered_distances.__len__() / 2 # intentional truncation
if index_float - index_whole < 0.001:
median_distance = (ordered_distances[index_whole] + ordered_distances[index_whole + 1]) / 2.0
else:
median_distance = ordered_distances[index_whole]
output_string += "Median distance: " + str(median_distance) + "\n"
# other stats
std = numpy.std(ordered_distances)
settings.output(str(std), "standard_deviation", ext=".dat", mode="write")
var = numpy.var(ordered_distances)
output_string += "Stardard Dev.: " + str(std)
output_string += "Variance: " + str(var)
total_seconds = time.time() - start_time
output_string += "Finished 'calculate_stats.py' in " + str(total_seconds) + " seconds!"
settings.output(output_string, "stats_real_valued", mode="write")
print "Finished 'calculate_stats.py' in " + str(total_seconds) + " seconds!" |
j0057/ansible-1 | refs/heads/fix-powershell-shebang-not-found | lib/ansible/runner/lookup_plugins/fileglob.py | 176 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
from ansible import utils
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
for term in terms:
dwimmed = utils.path_dwim(self.basedir, term)
globbed = glob.glob(dwimmed)
ret.extend(g for g in globbed if os.path.isfile(g))
return ret
|
hmgaudecker/econ-project-templates | refs/heads/master | {{cookiecutter.project_slug}}/.mywaflib/waflib/extras/fluid.py | 55 | #!/usr/bin/python
# encoding: utf-8
# Grygoriy Fuchedzhy 2009
"""
Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjunction with the 'cxx' feature.
"""
from waflib import Task
from waflib.TaskGen import extension
class fluid(Task.Task):
color = 'BLUE'
ext_out = ['.h']
run_str = '${FLUID} -c -o ${TGT[0].abspath()} -h ${TGT[1].abspath()} ${SRC}'
@extension('.fl')
def process_fluid(self, node):
"""add the .fl to the source list; the cxx file generated will be compiled when possible"""
cpp = node.change_ext('.cpp')
hpp = node.change_ext('.hpp')
self.create_task('fluid', node, [cpp, hpp])
if 'cxx' in self.features:
self.source.append(cpp)
def configure(conf):
conf.find_program('fluid', var='FLUID')
conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
|
scaphilo/koalixcrm | refs/heads/master | projectsettings/__init__.py | 14224 | # -*- coding: utf-8 -*-
|
superbobry/climin | refs/heads/master | climin/mathadapt.py | 1 | """This module provides functionality which is usable for
coding towards gnumpy and numpy: the idea is to avoid if clauses
in the optimizer code.
"""
import numpy as np
try:
import gnumpy as gp
except ImportError:
pass
def sqrt(x):
"""Return an array of the same shape containing the element square
root of `x`."""
return x ** 0.5
def zero_like(x):
"""Return an array of the same shape as `x` containing only zeros."""
return x * 0.
def ones_like(x):
"""Return an array of the same shape as `x` containing only ones."""
return x * 0. + 1.
def clip(a, a_min, a_max):
"""Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to the interval
edges. For example, if an interval of [0, 1] is specified, values smaller
than 0 become 0, and values larger than 1 become 1."""
if not isinstance(a, np.ndarray):
max_mask = (a > a_max)
max_tar = gp.ones(a.shape) * a_max
min_mask = (a < a_min)
min_tar = gp.ones(a.shape) * a_min
a_clipped = (
a * (1 - max_mask - min_mask)
+ max_tar * max_mask + min_tar * min_mask)
return a_clipped
else:
return np.clip(a, a_min, a_max)
def sign(x):
"""Returns an element-wise indication of the sign of a number."""
if not isinstance(x, np.ndarray):
return gp.sign(x)
else:
return np.sign(x)
def where(x, *args):
"""Delegate to gnumpy.where or numpy.where depending on the type of `x`."""
if not isinstance(x, np.ndarray):
return gp.where(x, *args)
else:
return np.where(x, *args)
def random_like(x):
"""Return an array of the same shape as `x` filled with random numbers from
the interval [0, 1)."""
if not isinstance(x, np.ndarray):
return gp.rand(x.shape)
else:
return np.random.random(x.shape)
def random_normal_like(x, loc, scale):
"""Return an array of the same shape as `x` filled with random numbers from
the interval [0, 1)."""
if not isinstance(x, np.ndarray):
return gp.randn(*x.shape) * scale + loc
else:
return np.random.normal(loc, scale, x.shape)
def assert_numpy(x):
"""Given a gnumpy or numpy array x, return an array with the same contents."""
if not isinstance(x, np.ndarray):
x = x.as_numpy_array()
else:
x = x.copy()
return x
def scalar(x):
if isinstance(x, float):
return x
if not x.size == 1:
raise ValueError('size is %i instead of 1' % x.size)
return x.reshape((1,))[0]
|
khaledhassan/python-xbee | refs/heads/master | examples/serial_example_series_1.py | 42 | #! /usr/bin/python
from xbee import XBee
import serial
"""
serial_example.py
By Paul Malmsten, 2010
Demonstrates reading the low-order address bits from an XBee Series 1
device over a serial port (USB) in API-mode.
"""
def main():
"""
Sends an API AT command to read the lower-order address bits from
an XBee Series 1 and looks for a response
"""
try:
# Open serial port
ser = serial.Serial('/dev/ttyUSB0', 9600)
# Create XBee Series 1 object
xbee = XBee(ser)
# Send AT packet
xbee.send('at', frame_id='A', command='DH')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='B', command='DL')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='C', command='MY')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='D', command='CE')
# Wait for response
response = xbee.wait_read_frame()
print response
except KeyboardInterrupt:
pass
finally:
ser.close()
if __name__ == '__main__':
main()
|
ansible/ansible-modules-extras | refs/heads/devel | cloud/amazon/sqs_queue.py | 23 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: sqs_queue
short_description: Creates or deletes AWS SQS queues.
description:
- Create or delete AWS SQS queues.
- Update attributes on existing queues.
version_added: "2.0"
author:
- Alan Loi (@loia)
- Fernando Jose Pando (@nand0p)
- Nadir Lloret (@nadirollo)
requirements:
- "boto >= 2.33.0"
options:
state:
description:
- Create or delete the queue
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the queue.
required: true
default_visibility_timeout:
description:
- The default visibility timeout in seconds.
required: false
default: null
message_retention_period:
description:
- The message retention period in seconds.
required: false
default: null
maximum_message_size:
description:
- The maximum message size in bytes.
required: false
default: null
delivery_delay:
description:
- The delivery delay in seconds.
required: false
default: null
receive_message_wait_time:
description:
- The receive message wait time in seconds.
required: false
default: null
policy:
description:
- The json dict policy to attach to queue
required: false
default: null
version_added: "2.1"
redrive_policy:
description:
- json dict with the redrive_policy (see example)
required: false
default: null
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
"""
RETURN = '''
default_visibility_timeout:
description: The default visibility timeout in seconds.
returned: always
sample: 30
delivery_delay:
description: The delivery delay in seconds.
returned: always
sample: 0
maximum_message_size:
description: The maximum message size in bytes.
returned: always
sample: 262144
message_retention_period:
description: The message retention period in seconds.
returned: always
sample: 345600
name:
description: Name of the SQS Queue
returned: always
sample: "queuename-987d2de0"
queue_arn:
description: The queue's Amazon resource name (ARN).
returned: on successful creation or update of the queue
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
receive_message_wait_time:
description: The receive message wait time in seconds.
returned: always
sample: 0
region:
description: Region that the queue was created within
returned: always
sample: 'us-east-1'
'''
EXAMPLES = '''
# Create SQS queue with redrive policy
- sqs_queue:
name: my-queue
region: ap-southeast-2
default_visibility_timeout: 120
message_retention_period: 86400
maximum_message_size: 1024
delivery_delay: 30
receive_message_wait_time: 20
policy: "{{ json_dict }}"
redrive_policy:
maxReceiveCount: 5
deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
# Delete SQS queue
- sqs_queue:
name: my-queue
region: ap-southeast-2
state: absent
'''
import json
import traceback
try:
import boto.sqs
from boto.exception import BotoServerError, NoAuthHandlerFound
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def create_or_update_sqs_queue(connection, module):
queue_name = module.params.get('name')
queue_attributes = dict(
default_visibility_timeout=module.params.get('default_visibility_timeout'),
message_retention_period=module.params.get('message_retention_period'),
maximum_message_size=module.params.get('maximum_message_size'),
delivery_delay=module.params.get('delivery_delay'),
receive_message_wait_time=module.params.get('receive_message_wait_time'),
policy=module.params.get('policy'),
redrive_policy=module.params.get('redrive_policy')
)
result = dict(
region=module.params.get('region'),
name=queue_name,
)
result.update(queue_attributes)
try:
queue = connection.get_queue(queue_name)
if queue:
# Update existing
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
else:
# Create new
if not module.check_mode:
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True
if not module.check_mode:
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def update_sqs_queue(queue,
check_mode=False,
default_visibility_timeout=None,
message_retention_period=None,
maximum_message_size=None,
delivery_delay=None,
receive_message_wait_time=None,
policy=None,
redrive_policy=None):
changed = False
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'Policy', policy,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
check_mode=check_mode) or changed
return changed
def set_queue_attribute(queue, attribute, value, check_mode=False):
if not value:
return False
try:
existing_value = queue.get_attributes(attributes=attribute)[attribute]
except:
existing_value = ''
# convert dict attributes to JSON strings (sort keys for comparing)
if attribute in ['Policy', 'RedrivePolicy']:
value = json.dumps(value, sort_keys=True)
if existing_value:
existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
if str(value) != existing_value:
if not check_mode:
queue.set_attribute(attribute, value)
return True
return False
def delete_sqs_queue(connection, module):
queue_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
name=queue_name,
)
try:
queue = connection.get_queue(queue_name)
if queue:
if not module.check_mode:
connection.delete_queue(queue)
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
default_visibility_timeout=dict(type='int'),
message_retention_period=dict(type='int'),
maximum_message_size=dict(type='int'),
delivery_delay=dict(type='int'),
receive_message_wait_time=dict(type='int'),
policy=dict(type='dict', required=False),
redrive_policy=dict(type='dict', required=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_sqs_queue(connection, module)
elif state == 'absent':
delete_sqs_queue(connection, module)
if __name__ == '__main__':
main()
|
RedHatInsights/insights-core | refs/heads/master | insights/combiners/tomcat_virtual_dir_context.py | 1 | """
TomcatVirtualDirContextCombined - check VirtualDirContext option used in Tomcat
===============================================================================
This combiner provides information about whether a VirtualDirContext is used in config files in
both default locations or location derived from running Tomcat process.
Examples::
>>> shared[TomcatVirtualDirContextCombined].data
{'/usr/share/tomcat/conf/server.xml':
[' <Resources className="org.apache.naming.resources.VirtualDirContext"'],
}
"""
from ..core.plugins import combiner
from ..parsers.tomcat_virtual_dir_context import TomcatVirtualDirContextFallback, \
TomcatVirtualDirContextTargeted
@combiner([TomcatVirtualDirContextFallback, TomcatVirtualDirContextTargeted])
class TomcatVirtualDirContextCombined(object):
"""
Combiner for VirtualDirContext usage in Tomcat config files.
"""
def __init__(self, tomcat_vdc_fallback, tomcat_vdc_targeted):
self.data = {}
fallback = tomcat_vdc_fallback if tomcat_vdc_fallback else None # Returns one parser
targeted = tomcat_vdc_targeted if tomcat_vdc_targeted else [] # Returns list of parsers
for parser in [fallback] + targeted:
if parser:
for key in parser.data:
if key not in self.data:
self.data[key] = []
self.data[key] += parser.data[key]
# Deduplicate lines
for key, value in self.data.items():
self.data[key] = sorted(set(value))
|
kidaa/encoded | refs/heads/master | src/encoded/upgrade/rnai.py | 1 | from contentbase import upgrade_step
from .shared import ENCODE2_AWARDS
@upgrade_step('rnai', '', '2')
def rnai_0_2(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
if 'status' in value:
if value['status'] == 'DELETED':
value['status'] = 'deleted'
elif value['status'] == 'CURRENT':
if value['award'] in ENCODE2_AWARDS:
value['status'] = 'released'
elif value['award'] not in ENCODE2_AWARDS:
value['status'] = 'in progress'
|
escherba/phraser | refs/heads/master | phraser/cc/preprocess/data_import/html_entity/generate_html_entity_parser_data.py | 2 | #!/usr/bin/python
# Generate tokenization-related normalization data as C++ source code.
#
# We have to do this custom because of specific requirements to match the Penn
# Treebank training data.
import codecs
import re
def each_line(file_name, encoding='utf-8'):
with codecs.open(file_name, encoding=encoding) as f:
for line in f:
x = line.find('#')
if x != -1:
line = line[:x]
line = line.rstrip()
if not line:
continue
if len(line) == 1 and ord(line[0]) == 65279:
continue
yield line
U_CODE_POINT_RE = re.compile('^U\\+[0-9A-F]{4,5}$')
def parse_u_code_point(s):
assert U_CODE_POINT_RE.match(s)
s = s[2:]
return int(s, 16)
HTML_ENTITY_RE = re.compile('^&[A-Za-z0-9]+;$')
def load_html_entities(f):
name2code = {}
for line in each_line(f):
ss = line.split()
code = parse_u_code_point(ss[0])
for s in ss[1:]:
assert HTML_ENTITY_RE.match(s)
name = s[1:-1]
name = str(name)
assert name not in name2code
name2code[name] = code
return name2code
H_SOURCE = """
#ifndef CC_PREPROCESS_HTML_ENTITY_PARSER_DATA_H_
#define CC_PREPROCESS_HTML_ENTITY_PARSER_DATA_H_
#include <string>
#include <unordered_map>
using std::string;
using std::unordered_map;
// Automatically generated file -- do not edit!
namespace html_entity_parser_data {
// HTML named entity -> Unicode code point.
extern unordered_map<string, uint32_t> HTML2UNICODE;
} // namespace html_entity_parser_data
#endif // CC_PREPROCESS_HTML_ENTITY_PARSER_DATA_H_
"""[1:]
CC_SOURCE = """
#include "html_entity_parser_data.h"
// Automatically generated file -- do not edit!
namespace html_entity_parser_data {
unordered_map<string, uint32_t> HTML2UNICODE = {
%s
};
} // namespace html_entity_parser_data
"""[1:]
INDENT = ' ' * 4
def quote(s):
return '"%s"' % s.replace('"', '\"')
def generate_cc(name2code):
lines = []
for name in sorted(name2code):
code = name2code[name]
line = '%s{%s, 0x%x},' % (INDENT, quote(name), code)
lines.append(line)
return CC_SOURCE % '\n'.join(lines)
def main():
h = H_SOURCE
name2code = load_html_entities('html_entities.txt')
cc = generate_cc(name2code)
open('../../html_entity_parser_data.h', 'wb').write(h)
open('../../html_entity_parser_data.cc', 'wb').write(cc)
if __name__ == '__main__':
main()
|
simbha/mAngE-Gin | refs/heads/master | lib/Django 1.7/django/contrib/gis/geos/geometry.py | 3 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.gdal.error import SRSException
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
from django.contrib.gis.geos.prepared import PreparedGeometry
GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
|
jm-begon/clustertools | refs/heads/master | continuous_integration/configure_slurm.py | 1 | # This file is taken from the Pulsar project
# (https://github.com/galaxyproject/pulsar) which is apache 2.0
#
from socket import gethostname
from string import Template
from subprocess import call
from getpass import getuser
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=$hostname
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
FastSchedule=2
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=1 State=UNKNOWN RealMemory=5000
PartitionName=debug Nodes=$hostname Default=YES MaxTime=INFINITE State=UP
'''
def main():
template_params = {"hostname": gethostname(), "user": getuser()}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents)
call("slurmctld")
call("slurmd")
if __name__ == "__main__":
main() |
mhaberler/machinekit | refs/heads/master | share/fdm/thermistor_tables/Marlin_converter/ConvertTable.py | 12 | #!/usr/bin/python
# encoding: utf-8
"""
ConvertTable.py
Created by Alexander Rössler on 2014-03-24.
"""
import argparse
import string
def v2R(value):
return (4700.0*value)/(1024.0-value)
parser = argparse.ArgumentParser(description='Converts a Marlin thermistor table to Machinekit thermistor table')
parser.add_argument('-i', '--input', help='input file', required=True)
parser.add_argument('-o', '--output', help='output file', required=True)
args = parser.parse_args()
inputFile = args.input
outputFile = args.output
with open(inputFile, "r") as f:
with open(outputFile, "w") as out:
out.write("# Autoconverted thermistor table for " + inputFile + "\n")
out.write("# Temp\tResistance\taplha\n")
content = f.readlines()
for line in content:
line = ''.join(line.split())
line = string.replace(line, '{', '')
line = string.replace(line, '}', '')
items = line.split(',')
resistance = v2R(float(items[0].split('*')[0]))
temperature = float(items[1])
alpha = 0.0
out.write(str(temperature) + "\t" + '{0:.02f}'.format(resistance) + "\t" + str(alpha) + "\n") |
jrrembert/django | refs/heads/master | tests/dates/models.py | 194 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=100)
pub_date = models.DateField()
categories = models.ManyToManyField("Category", related_name="articles")
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
article = models.ForeignKey(Article, models.CASCADE, related_name="comments")
text = models.TextField()
pub_date = models.DateField()
approval_date = models.DateField(null=True)
def __str__(self):
return 'Comment to %s (%s)' % (self.article.title, self.pub_date)
class Category(models.Model):
name = models.CharField(max_length=255)
|
fireeye/flare-ida | refs/heads/master | python/flare/code_grafter.py | 1 | # -*- coding: utf-8 -*-
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.
"""Code grafting: Static linking of code into IDA Databases to aid emulation.
Code Grafting allows you to graft static implementations of imported
functions into your IDA Database (IDB) for purposes of emulation in Bochs IDB
mode or by other emulators.
Instructions for adding new code to the library of synthetic import function
implementations supported by Code Grafter:
1.) Build a binary with position-independent native code to replace your
functions of interest.
2.) Open your binary in IDA Pro and import/use `mykutils.emit_fnbytes_python()`
to obtain a Python-compatible hex string containing the function's opcodes.
3.) Add the resulting string to `code_grafter.g_fnbytes` (defined in
`code_grafter.py`).
4.) In the IDB for your sample, you can then import `code_grafter` and graft
code onto your IDB to support emulation of the function that you just added
to the function implementation library.
from flare import code_grafter
cg = code_grafter.CodeGrafter()
cg.graftCodeToIdb()
If you don't want to modify your copy of `code_grafter.py`, you may find it
possible to instead add your extracted function opcode strings to a separate
script, then import `code_grafter` and dynamically add the opcdoe strings to
`code_grafter.g_fnbytes` before using Code Grafter on your IDB.
"""
import idc
import ida_ua
import idaapi
import idautils
import ida_xref
import ida_bytes
import mykutils
from mykutils import phex
from seghelper import SegPlanner
import struct
import logging
import binascii
__author__ = 'Michael Bailey'
__copyright__ = 'Copyright (C) 2019 FireEye, Inc.'
__license__ = 'Apache License 2.0'
__version__ = '1.0'
logger = logging.getLogger()
logging.basicConfig(format='%(message)s', level=logging.INFO)
###############################################################################
# Constants
###############################################################################
METAPC = 'metapc'
###############################################################################
# Globals
###############################################################################
# Code Carving/Grafting
g_seg_sig_code_grafter = 'Segment added by FLARE Code Grafter'
g_patched_call_cmt = 'Patched for emulation, was:'
g_cmt_pointed = '(Pointed'
# Name tuple -> implementation lookup
g_patch_pointer_width = {
32: idc.patch_dword,
64: idc.patch_qword,
}
# Per-architecture/bitness library of native function implementations. Each
# entry is a hexadecimal ASCII string of native opcodes compiled from C and
# specially crafted not to access globals or call other functions.
g_fnbytes = {
METAPC: {
32: {},
64: {},
}
}
g_fnbytes[METAPC][32]['memcpy'] = (
'55' # 0x1000: push ebp
'8BEC' # 0x1001: mov ebp, esp
'83EC08' # 0x1003: sub esp, 8
'8B4508' # 0x1006: mov eax, dword ptr [ebp + 8]
'8945F8' # 0x1009: mov dword ptr [ebp - 8], eax
'8B4D0C' # 0x100c: mov ecx, dword ptr [ebp + 0xc]
'894DFC' # 0x100f: mov dword ptr [ebp - 4], ecx
'8B5510' # 0x1012: mov edx, dword ptr [ebp + 0x10]
'8B4510' # 0x1015: mov eax, dword ptr [ebp + 0x10]
'83E801' # 0x1018: sub eax, 1
'894510' # 0x101b: mov dword ptr [ebp + 0x10], eax
'85D2' # 0x101e: test edx, edx
'741E' # 0x1020: je 0x1040
'8B4DF8' # 0x1022: mov ecx, dword ptr [ebp - 8]
'8B55FC' # 0x1025: mov edx, dword ptr [ebp - 4]
'8A02' # 0x1028: mov al, byte ptr [edx]
'8801' # 0x102a: mov byte ptr [ecx], al
'8B4DF8' # 0x102c: mov ecx, dword ptr [ebp - 8]
'83C101' # 0x102f: add ecx, 1
'894DF8' # 0x1032: mov dword ptr [ebp - 8], ecx
'8B55FC' # 0x1035: mov edx, dword ptr [ebp - 4]
'83C201' # 0x1038: add edx, 1
'8955FC' # 0x103b: mov dword ptr [ebp - 4], edx
'EBD2' # 0x103e: jmp 0x1012
'8B4508' # 0x1040: mov eax, dword ptr [ebp + 8]
'8BE5' # 0x1043: mov esp, ebp
'5D' # 0x1045: pop ebp
'C3' # 0x1046: ret
)
g_fnbytes[METAPC][64]['memcpy'] = (
'4C89442418' # 0x1000: mov qword ptr [rsp + 0x18], r8
'4889542410' # 0x1005: mov qword ptr [rsp + 0x10], rdx
'48894C2408' # 0x100a: mov qword ptr [rsp + 8], rcx
'4883EC18' # 0x100f: sub rsp, 0x18
'488B442420' # 0x1013: mov rax, qword ptr [rsp + 0x20]
'4889442408' # 0x1018: mov qword ptr [rsp + 8], rax
'488B442428' # 0x101d: mov rax, qword ptr [rsp + 0x28]
'48890424' # 0x1022: mov qword ptr [rsp], rax
'488B4C2430' # 0x1026: mov rcx, qword ptr [rsp + 0x30]
'488B442430' # 0x102b: mov rax, qword ptr [rsp + 0x30]
'4883E801' # 0x1030: sub rax, 1
'4889442430' # 0x1034: mov qword ptr [rsp + 0x30], rax
'4885C9' # 0x1039: test rcx, rcx
'742A' # 0x103c: je 0x1068
'488B4C2408' # 0x103e: mov rcx, qword ptr [rsp + 8]
'488B0424' # 0x1043: mov rax, qword ptr [rsp]
'0FB600' # 0x1047: movzx eax, byte ptr [rax]
'8801' # 0x104a: mov byte ptr [rcx], al
'488B442408' # 0x104c: mov rax, qword ptr [rsp + 8]
'4883C001' # 0x1051: add rax, 1
'4889442408' # 0x1055: mov qword ptr [rsp + 8], rax
'488B0424' # 0x105a: mov rax, qword ptr [rsp]
'4883C001' # 0x105e: add rax, 1
'48890424' # 0x1062: mov qword ptr [rsp], rax
'EBBE' # 0x1066: jmp 0x1026
'488B442420' # 0x1068: mov rax, qword ptr [rsp + 0x20]
'4883C418' # 0x106d: add rsp, 0x18
'C3' # 0x1071: ret
)
g_fnbytes[METAPC][32]['memset'] = (
'55' # 0x1000: push ebp
'8BEC' # 0x1001: mov ebp, esp
'51' # 0x1003: push ecx
'8B4508' # 0x1004: mov eax, dword ptr [ebp + 8]
'8945FC' # 0x1007: mov dword ptr [ebp - 4], eax
'8B4D10' # 0x100a: mov ecx, dword ptr [ebp + 0x10]
'8B5510' # 0x100d: mov edx, dword ptr [ebp + 0x10]
'83EA01' # 0x1010: sub edx, 1
'895510' # 0x1013: mov dword ptr [ebp + 0x10], edx
'85C9' # 0x1016: test ecx, ecx
'7413' # 0x1018: je 0x102d
'8B45FC' # 0x101a: mov eax, dword ptr [ebp - 4]
'8A4D0C' # 0x101d: mov cl, byte ptr [ebp + 0xc]
'8808' # 0x1020: mov byte ptr [eax], cl
'8B55FC' # 0x1022: mov edx, dword ptr [ebp - 4]
'83C201' # 0x1025: add edx, 1
'8955FC' # 0x1028: mov dword ptr [ebp - 4], edx
'EBDD' # 0x102b: jmp 0x100a
'8B4508' # 0x102d: mov eax, dword ptr [ebp + 8]
'8BE5' # 0x1030: mov esp, ebp
'5D' # 0x1032: pop ebp
'C3' # 0x1033: ret
)
g_fnbytes[METAPC][64]['memset'] = (
'4C89442418' # 0x1000: mov qword ptr [rsp + 0x18], r8
'89542410' # 0x1005: mov dword ptr [rsp + 0x10], edx
'48894C2408' # 0x1009: mov qword ptr [rsp + 8], rcx
'4883EC18' # 0x100e: sub rsp, 0x18
'488B442420' # 0x1012: mov rax, qword ptr [rsp + 0x20]
'48890424' # 0x1017: mov qword ptr [rsp], rax
'488B4C2430' # 0x101b: mov rcx, qword ptr [rsp + 0x30]
'488B442430' # 0x1020: mov rax, qword ptr [rsp + 0x30]
'4883E801' # 0x1025: sub rax, 1
'4889442430' # 0x1029: mov qword ptr [rsp + 0x30], rax
'4885C9' # 0x102e: test rcx, rcx
'7419' # 0x1031: je 0x104c
'488B0C24' # 0x1033: mov rcx, qword ptr [rsp]
'0FB6442428' # 0x1037: movzx eax, byte ptr [rsp + 0x28]
'8801' # 0x103c: mov byte ptr [rcx], al
'488B0424' # 0x103e: mov rax, qword ptr [rsp]
'4883C001' # 0x1042: add rax, 1
'48890424' # 0x1046: mov qword ptr [rsp], rax
'EBCF' # 0x104a: jmp 0x101b
'488B442420' # 0x104c: mov rax, qword ptr [rsp + 0x20]
'4883C418' # 0x1051: add rsp, 0x18
'C3' # 0x1055: ret
)
g_fnbytes[METAPC][32]['strcpy'] = (
'55' # 0x1000: push ebp
'8BEC' # 0x1001: mov ebp, esp
'51' # 0x1003: push ecx
'8B4508' # 0x1004: mov eax, dword ptr [ebp + 8]
'8945FC' # 0x1007: mov dword ptr [ebp - 4], eax
'8B4DFC' # 0x100a: mov ecx, dword ptr [ebp - 4]
'8B550C' # 0x100d: mov edx, dword ptr [ebp + 0xc]
'8A02' # 0x1010: mov al, byte ptr [edx]
'8801' # 0x1012: mov byte ptr [ecx], al
'8B4DFC' # 0x1014: mov ecx, dword ptr [ebp - 4]
'0FBE11' # 0x1017: movsx edx, byte ptr [ecx]
'8B45FC' # 0x101a: mov eax, dword ptr [ebp - 4]
'83C001' # 0x101d: add eax, 1
'8945FC' # 0x1020: mov dword ptr [ebp - 4], eax
'8B4D0C' # 0x1023: mov ecx, dword ptr [ebp + 0xc]
'83C101' # 0x1026: add ecx, 1
'894D0C' # 0x1029: mov dword ptr [ebp + 0xc], ecx
'85D2' # 0x102c: test edx, edx
'7402' # 0x102e: je 0x1032
'EBD8' # 0x1030: jmp 0x100a
'8B4508' # 0x1032: mov eax, dword ptr [ebp + 8]
'8BE5' # 0x1035: mov esp, ebp
'5D' # 0x1037: pop ebp
'C3' # 0x1038: ret
)
g_fnbytes[METAPC][64]['strcpy'] = (
'4889542410' # 0x1000: mov qword ptr [rsp + 0x10], rdx
'48894C2408' # 0x1005: mov qword ptr [rsp + 8], rcx
'4883EC18' # 0x100a: sub rsp, 0x18
'488B442420' # 0x100e: mov rax, qword ptr [rsp + 0x20]
'48890424' # 0x1013: mov qword ptr [rsp], rax
'488B0C24' # 0x1017: mov rcx, qword ptr [rsp]
'488B442428' # 0x101b: mov rax, qword ptr [rsp + 0x28]
'0FB600' # 0x1020: movzx eax, byte ptr [rax]
'8801' # 0x1023: mov byte ptr [rcx], al
'488B0424' # 0x1025: mov rax, qword ptr [rsp]
'0FBE08' # 0x1029: movsx ecx, byte ptr [rax]
'488B0424' # 0x102c: mov rax, qword ptr [rsp]
'4883C001' # 0x1030: add rax, 1
'48890424' # 0x1034: mov qword ptr [rsp], rax
'488B442428' # 0x1038: mov rax, qword ptr [rsp + 0x28]
'4883C001' # 0x103d: add rax, 1
'4889442428' # 0x1041: mov qword ptr [rsp + 0x28], rax
'85C9' # 0x1046: test ecx, ecx
'7402' # 0x1048: je 0x104c
'EBCB' # 0x104a: jmp 0x1017
'488B442420' # 0x104c: mov rax, qword ptr [rsp + 0x20]
'4883C418' # 0x1051: add rsp, 0x18
'C3' # 0x1055: ret
)
g_strlen_metapc_32bit = (
'55' # 0x1000: push ebp
'8BEC' # 0x1001: mov ebp, esp
'51' # 0x1003: push ecx
'C745FC00000000' # 0x1004: mov dword ptr [ebp - 4], 0
'8B4508' # 0x100b: mov eax, dword ptr [ebp + 8]
'0FBE08' # 0x100e: movsx ecx, byte ptr [eax]
'8B5508' # 0x1011: mov edx, dword ptr [ebp + 8]
'83C201' # 0x1014: add edx, 1
'895508' # 0x1017: mov dword ptr [ebp + 8], edx
'85C9' # 0x101a: test ecx, ecx
'740B' # 0x101c: je 0x1029
'8B45FC' # 0x101e: mov eax, dword ptr [ebp - 4]
'83C001' # 0x1021: add eax, 1
'8945FC' # 0x1024: mov dword ptr [ebp - 4], eax
'EBE2' # 0x1027: jmp 0x100b
'8B45FC' # 0x1029: mov eax, dword ptr [ebp - 4]
'8BE5' # 0x102c: mov esp, ebp
'5D' # 0x102e: pop ebp
'C3' # 0x102f: ret
)
g_strlen_metapc_64bit = (
'48894C2408' # 0x1000: mov qword ptr [rsp + 8], rcx
'4883EC18' # 0x1005: sub rsp, 0x18
'48C7042400000000' # 0x1009: mov qword ptr [rsp], 0
'488B442420' # 0x1011: mov rax, qword ptr [rsp + 0x20]
'0FBE08' # 0x1016: movsx ecx, byte ptr [rax]
'488B442420' # 0x1019: mov rax, qword ptr [rsp + 0x20]
'4883C001' # 0x101e: add rax, 1
'4889442420' # 0x1022: mov qword ptr [rsp + 0x20], rax
'85C9' # 0x1027: test ecx, ecx
'740E' # 0x1029: je 0x1039
'488B0424' # 0x102b: mov rax, qword ptr [rsp]
'4883C001' # 0x102f: add rax, 1
'48890424' # 0x1033: mov qword ptr [rsp], rax
'EBD8' # 0x1037: jmp 0x1011
'488B0424' # 0x1039: mov rax, qword ptr [rsp]
'4883C418' # 0x103d: add rsp, 0x18
'C3' # 0x1041: ret
)
# Covers lstrlenA
g_fnbytes[METAPC][32]['strlen'] = g_strlen_metapc_32bit
g_fnbytes[METAPC][64]['strlen'] = g_strlen_metapc_64bit
# return "en-US";
g_fnbytes[METAPC][32]['setlocale'] = (
'E800000000' # 0x1000: call 0x1005
'58' # 0x1005: pop eax
'83C007' # 0x1006: add eax, 7
'C20800' # 0x1009: ret 8
'656e2d555300' # db 'en-US',0
)
g_fnbytes[METAPC][64]['setlocale'] = (
'488B0501000000' # 0x1000: mov rax, qword ptr [rip + 1]
'C3' # 0x1007: ret
'656e2d555300' # db 'en-US',0
)
g_fnbytes[METAPC][32]['wsetlocale'] = (
'E800000000' # 0x1000: call 0x1005
'58' # 0x1005: pop eax
'83C007' # 0x1006: add eax, 7
'C20800' # 0x1009: ret 8
'65006e002d00550053000000' # text "UTF-16LE", 'en-US',0
)
g_fnbytes[METAPC][64]['wsetlocale'] = (
'488B0501000000' # 0x1000: mov rax, qword ptr [rip + 1]
'C3' # 0x1007: ret
'65006e002d00550053000000' # text "UTF-16LE", 'en-US',0
)
g_retn0_metapc_64bit = (
'4831C0' # 0x1000: xor rax, rax
'C3' # 0x1003: ret
)
g_retn1_metapc_64bit = (
'4831C0' # 0x1000: xor rax, rax
'4883C001' # 0x1003: add rax, 1
'C3' # 0x1007: ret
)
g_fnbytes[METAPC][32]['retn0'] = (
'31C0' # 0x1000: xor eax, eax
'C3' # 0x1002: ret
)
g_fnbytes[METAPC][64]['retn0'] = g_retn0_metapc_64bit
g_fnbytes[METAPC][32]['retn0_1arg'] = (
'31C0' # 0x1000: xor eax, eax
'C20400' # 0x1002: ret 4
)
g_fnbytes[METAPC][64]['retn0_1arg'] = g_retn0_metapc_64bit
g_fnbytes[METAPC][32]['retn0_3arg'] = (
'31C0' # 0x1000: xor eax, eax
'C20C00' # 0x1002: ret 0xc
)
g_fnbytes[METAPC][64]['retn0_3arg'] = g_retn0_metapc_64bit
g_fnbytes[METAPC][32]['retn1'] = (
'31C0' # 0x1000: xor eax, eax
'40' # 0x1002: inc eax
'C3' # 0x1003: ret
)
g_fnbytes[METAPC][64]['retn1'] = g_retn1_metapc_64bit
g_fnbytes[METAPC][32]['retn1_1arg'] = (
'31C0' # 0x1000: xor eax, eax
'40' # 0x1002: inc eax
'C20400' # 0x1003: ret 4
)
g_fnbytes[METAPC][64]['retn1_1arg'] = g_retn1_metapc_64bit
g_fnbytes[METAPC][32]['retn1_2arg'] = (
'31C0' # 0x1000: xor eax, eax
'40' # 0x1002: inc eax
'C20800' # 0x1003: ret 8
)
g_fnbytes[METAPC][64]['retn1_2arg'] = g_retn1_metapc_64bit
g_fnbytes[METAPC][32]['retn1_6arg'] = (
'31C0' # 0x1000: xor eax, eax
'40' # 0x1002: inc eax
'C21800' # 0x1003: ret 0x18
)
g_fnbytes[METAPC][64]['retn1_6arg'] = g_retn1_metapc_64bit
# Allocator => All names it is known by
g_allocators_aliases = {
'malloc': ('??2@YAPAXI@Z', '_malloc', 'malloc',),
'HeapAlloc': ('HeapAlloc',),
'VirtualAlloc': ('VirtualAlloc',),
}
# Memory allocation templates cannot be handled as simply as static functions
# can be, because they must access global data. Furthermore, these are all
# basically the same allocator but they have different signatures.
g_fnbytes_allocators = {
METAPC: {
32: {},
64: {},
}
}
# Main differences among allocator implementations:
# * [ebp+size] is different per argument layout
# * cdecl (malloc) and various stdcall (HeapAlloc/VirtualAlloc) return
# opcodes
# Violating D.R.Y. to make it clear how to add these, make them work, and
# maintain them.
g_fnbytes_allocators[METAPC][32]['malloc'] = (
'55' # push ebp
'8bec' # mov ebp, esp
'51' # push ecx
'a1{next_}' # mov eax, _next
'05{arena}' # add eax, offset _arena
'8945fc' # mov [ebp+ret], eax
'8b4d08' # mov ecx, [ebp+size]
'8b15{next_}' # mov edx, _next
'8d440aff' # lea eax, [edx+ecx-1]
'0dff0f0000' # or eax, 0FFFh
'83c001' # add eax, 1
'a3{next_}' # mov _next, eax
'8b45fc' # mov eax, [ebp+ret]
'8be5' # mov esp, ebp
'5d' # pop ebp
'c3' # retn
)
g_fnbytes_allocators[METAPC][64]['malloc'] = (
'48894c2408' # mov [rsp+arg_0], rcx
'4883ec18' # sub rsp, 18h
# '488d0570cd0100' # lea rax, arena ; Original, RIP-relative
'48B8{arena}' # mov rax, &cs:arena ; Hand-written, absolute64
# '48030529bf0100' # add rax, cs:next ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute64
'480301' # add rax, [rcx] ; Hand-written
'48890424' # mov [rsp+18h+var_18], rax
# '488b0d1ebf0100' # mov rcx, cs:next ; Original, RIP-relative
'488b09' # mov rcx, [rcx] ; Hand-written, deref &next
'488b442420' # mov rax, [rsp+18h+arg_0]
'488d4401ff' # lea rax, [rcx+rax-1]
'480dff0f0000' # or rax, 0FFFh
'4883c001' # add rax, 1
# '48890503bf0100' # mov cs:next, rax ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute
'488901' # mov [rcx], rax
'488b0424' # mov rax, [rsp+18h+var_18]
'4883c418' # add rsp, 18h
'c3' # retn
)
g_fnbytes_allocators[METAPC][32]['HeapAlloc'] = (
'55' # push ebp
'8bec' # mov ebp, esp
'51' # push ecx
'a1{next_}' # mov eax, _next
'05{arena}' # add eax, offset _arena
'8945fc' # mov [ebp+ret], eax
'8b4d10' # mov ecx, [ebp+size]
'8b15{next_}' # mov edx, _next
'8d440aff' # lea eax, [edx+ecx-1]
'0dff0f0000' # or eax, 0FFFh
'83c001' # add eax, 1
'a3{next_}' # mov _next, eax
'8b45fc' # mov eax, [ebp+ret]
'8be5' # mov esp, ebp
'5d' # pop ebp
'c20c00' # retn 0Ch
)
g_fnbytes_allocators[METAPC][64]['HeapAlloc'] = (
'4c89442418' # mov [rsp+arg_10], r8
'89542410' # mov [rsp+arg_8], edx
'48894c2408' # mov [rsp+arg_0], rcx
'4883ec18' # sub rsp, 18h
# '488d0517cd0100' # lea rax, arena ; Original, RIP-relative
'48B8{arena}' # mov rax, &cs:arena ; Hand-written, absolute64
# '480305d0be0100' # add rax, cs:next ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute64
'480301' # add rax, [rcx] ; Hand-written
'48890424' # mov [rsp+18h+var_18], rax
# '488b0dc5be0100' # mov rcx, cs:next ; Original, RIP-relative
'488b09' # mov rcx, [rcx] ; Hand-written, deref &next
'488b442430' # mov rax, [rsp+18h+arg_10]
'488d4401ff' # lea rax, [rcx+rax-1]
'480dff0f0000' # or rax, 0FFFh
'4883c001' # add rax, 1
# '488905aabe0100' # mov cs:next, rax ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute
'488901' # mov [rcx], rax
'488b0424' # mov rax, [rsp+18h+var_18]
'4883c418' # add rsp, 18h
'c3' # retn
)
g_fnbytes_allocators[METAPC][32]['VirtualAlloc'] = (
'55' # push ebp
'8bec' # mov ebp, esp
'51' # push ecx
'a1{next_}' # mov eax, _next
'05{arena}' # add eax, offset _arena
'8945fc' # mov [ebp+ret], eax
'8b4d0c' # mov ecx, [ebp+dwSize]
'8b15{next_}' # mov edx, _next
'8d440aff' # lea eax, [edx+ecx-1]
'0dff0f0000' # or eax, 0FFFh
'83c001' # add eax, 1
'a3{next_}' # mov _next, eax
'8b45fc' # mov eax, [ebp+ret]
'8be5' # mov esp, ebp
'5d' # pop ebp
'c21000' # retn 10h
)
g_fnbytes_allocators[METAPC][64]['VirtualAlloc'] = (
'44894c2420' # mov [rsp+arg_18], r9d
'4489442418' # mov [rsp+arg_10], r8d
'4889542410' # mov [rsp+arg_8], rdx
'48894c2408' # mov [rsp+arg_0], rcx
'4883ec18' # sub rsp, 18h
# '488d05b1cc0100' # lea rax, arena ; Original, RIP-relative
'48B8{arena}' # mov rax, &cs:arena ; Hand-written, absolute64
# '4803056abe0100' # add rax, cs:next ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute64
'480301' # add rax, [rcx] ; Hand-written
'48890424' # mov [rsp+18h+var_18], rax
# '488b0d5fbe0100' # mov rcx, cs:next ; Original, RIP-relative
'488b09' # mov rcx, [rcx] ; Hand-written, deref &next
'488b442428' # mov rax, [rsp+18h+arg_8]
'488d4401ff' # lea rax, [rcx+rax-1]
'480dff0f0000' # or rax, 0FFFh
'4883c001' # add rax, 1
# '48890544be0100' # mov cs:next, rax ; Original, RIP-relative
'48B9{next_}' # mov rcx, &cs:next_ ; Hand-written, absolute
'488901' # mov [rcx], rax
'488b0424' # mov rax, [rsp+18h+var_18]
'4883c418' # add rsp, 18h
'c3' # retn
)
###############################################################################
# Code Grafting i.e. function injection
###############################################################################
# Why:
# For statically linking stand-in functions to aid emulation under e.g. Bochs.
class CodeGraftingUnsupportedFunc(Exception):
pass
class CodeGraftingUnsupportedArch(Exception):
pass
class CodeGraftingDisplacementError(Exception):
pass
class CodeGraftingAlreadyPresent(Exception):
pass
class CodeGrafter():
"""Graft code into IDA database to allow emulation of functions that call
certain imports and memory allocators.
To use:
1. Instantiate a `CodeGrafter`
2. Call the `graftCodeToIdb()` method
"""
def __init__(self, cpu=None, bits=None):
self.cpu = cpu or idaapi.get_inf_structure().procname
self.bits = bits or mykutils.get_bitness()
self._emu_stubs = {
('IsDebuggerPresent',): self.get_fnbytes('retn0'),
('InitializeCriticalSection', 'EnterCriticalSection',
'LeaveCriticalSection', 'DeleteCriticalSection',
'EncodePointer', 'DecodePointer'):
self.get_fnbytes('retn0_1arg'),
('FlsSetValue', '___acrt_FlsSetValue@8'):
self.get_fnbytes('retn1_2arg'),
('FlsGetValue', '___acrt_FlsGetValue@4'):
self.get_fnbytes('retn1_1arg'),
('setlocale', '_setlocale', '__setlocale'):
self.get_fnbytes('setlocale'),
('wsetlocale', '_wsetlocale', '__wsetlocale',):
self.get_fnbytes('wsetlocale'),
('GetLastError',): self.get_fnbytes('retn0'),
('SetLastError',): self.get_fnbytes('retn0_1arg'),
('CreateThread',): self.get_fnbytes('retn1_6arg'),
('free', '_free', '??3@YAXPAX@Z'): self.get_fnbytes('retn0'),
('HeapFree',): self.get_fnbytes('retn0_3arg'),
('strcpy', '_strcpy'): self.get_fnbytes('strcpy'),
('strlen',): self.get_fnbytes('strlen'),
('lstrlenA',): self.get_fnbytes('strlen'),
('memcpy', '_memcpy'): self.get_fnbytes('memcpy'),
('memset', '_memset'): self.get_fnbytes('memset'),
}
def _stubname(self, s):
return 'stub_%s' % (s)
def get_fnbytes(self, fname):
return self._lookup_bytes(g_fnbytes, fname)
def get_fnbytes_allocator(self, fname):
return self._lookup_bytes(g_fnbytes_allocators, fname)
def _lookup_bytes(self, table, fname):
try:
return table[self.cpu][self.bits][fname]
except LookupError as e:
if str(e)[1:-1] == fname:
raise CodeGraftingUnsupportedFunc('Function %s not supported' %
(fname))
else:
raise CodeGraftingUnsupportedArch('%s-bit %s not supported' %
(self.bits, self.cpu))
def graftCodeToIdb(self, mem=0x4000000):
"""Add segments, inject stub functions, and patch calls within IDB to
point to the stubs.
Args:
mem (numbers.Integral): Size of segment to use for memory
allocators. The more generous, the less likelihood of running
out of memory due to successive calls to e.g. malloc and
crashing your emulator, Bochs debug session, etc. Unless or
until someone is brave enough to implement a stub malloc
implementation that supports free(), this unfortunately has to
be pretty sizeable in practice.
"""
if self._findGraftedSegments():
msg = ('Found grafted code segments; use '
'removeGraftedCodeFromIdb() first if you want to graft '
'anew')
raise CodeGraftingAlreadyPresent(msg)
self._addSegments(mem)
return self._patchCalls()
def removeGraftedCodeFromIdb(self):
grafted = self._findGraftedSegments()
if grafted:
self._unpatchCalls(grafted)
for seg in grafted:
idc.del_segm(seg.start, idc.SEGMOD_KILL)
def _findGraftedSegments(self):
return [s for s in SegPlanner() if
idc.get_cmt(s.start, 1) == g_seg_sig_code_grafter]
def _addSegments(self, mem=0x4000000):
"""Create emulation stub segments.
Includes generous memory allocation space by default.
"""
arena_seg_size = mem
code_seg_size = 0x1000
use32 = 1
fmt_ptr_width = '<I'
if self.bits == 64:
use32 = 2
fmt_ptr_width = '<Q'
def le_hex(n): # Little-endian N-bit hex
return binascii.hexlify(struct.pack(fmt_ptr_width, n))
seg_plan = SegPlanner()
# Pick locations for the code and the malloc "arena"
#
# Note 1: Find space for stub code segment first, making use of
# SegPlanner's bias in planning new segments close to other segments if
# possible. This violates the Franklin-Covey principle of fitting big
# stones in first before smaller stones create spatial fragmentation in
# the jar. But in a 64-bit IDB, depending on the size of the malloc
# arena, doing this in the opposite order could increase the chance of
# a stub function residing at a distance from its callsite that cannot
# be represented in 32 bits.
#
# Note 2: SegPlanner ensures segments won't start at 0, which otherwise
# could result in a NULL return from an allocator stub like malloc
# erroneously signifying failure.
code = seg_plan.addSegAnywhere(code_seg_size)
arena = seg_plan.addSegAnywhere(arena_seg_size)
for seg in (code, arena):
idc.AddSeg(seg.start, seg.end, 0, use32, 0, idc.scPub)
idc.set_cmt(seg.start, g_seg_sig_code_grafter, 1)
# Designate location for the malloc "arena"
va_arena = arena.start
# Designate location for and write the malloc next-index pointer.
#
# Placing this before our fake heap would misalign the first allocation
# or waste space when the allocator skips bytes to compensate for
# alignment.
#
# Placing it at the end of our fake heap would risk corrupting it in
# the event of a buffer overrun (or heap overrun).
#
# Assuming 64-bit to provide enough space irrespective of architecture
va_malloc_next = code.start
idc.patch_qword(va_malloc_next, 0)
idc.create_qword(va_malloc_next)
mykutils.makename_safe(va_malloc_next, self._stubname('malloc_next'))
va_next_code = code.start + 0x10
def next_addr_align4(base, sc):
return mykutils.align(base + (len(sc) / 2), 4)
def add_stub_func(va, sc, nm):
idaapi.patch_bytes(va, binascii.unhexlify(sc))
idc.create_insn(va)
idc.add_func(va)
mykutils.makename_safe(va, self._stubname(nm))
cmt = ('%s implementation generated by FLARE Code Grafter' %
(nm))
idc.set_cmt(va, cmt, 1)
# Allocators are handled specially because their templates must be
# filled with addresses for the global data they access
for allocator_name in g_allocators_aliases:
code = self.get_fnbytes_allocator(allocator_name).format(
next_=le_hex(va_malloc_next),
arena=le_hex(va_arena)
)
add_stub_func(va_next_code, code, allocator_name)
va_next_code = next_addr_align4(va_next_code, code)
# Functions not referencing data or other code are simpler:
for names, sc in self._emu_stubs.items():
for nm in names:
add_stub_func(va_next_code, sc, nm)
va_next_code = next_addr_align4(va_next_code, sc)
def _get_imp_for_register_call(self, va_call, nm=None):
if idc.print_insn_mnem(va_call) != 'call':
msg = 'va_call must be the virtual address of a call instruction'
raise ValueError(msg)
reg = idc.print_operand(va_call, 0)
va_mov = mykutils.find_instr(va_call, 'up', 'mov',
[(0, 1, reg), (1, 2, None)])
if not va_mov:
return None
if nm and (nm not in idc.print_operand(va_mov, 1)):
return None
va_imp = idc.get_operand_value(va_mov, 1)
return va_imp
def _patchCalls(self):
def do_patch_call(va):
retval = False
stub_loc = idc.get_name_ea_simple(self._stubname(nm))
# Preserve original disassembly and format new comment
old_target = idc.print_operand(va, 0)
orig_cmt = idc.get_cmt(va, 0) or ''
new_cmt = '%s\n\t%s' % (g_patched_call_cmt, idc.GetDisasm(va))
if idc.get_operand_type(va, 0) == ida_ua.o_mem:
retval = patch_import(va, self._stubname(nm))
new_cmt += '\n%s %s to %s)' % (g_cmt_pointed, old_target,
self._stubname(nm))
elif idc.get_operand_type(va, 0) == ida_ua.o_reg:
va_imp = self._get_imp_for_register_call(va, nm)
if va_imp:
patch_pointer_width(va_imp, stub_loc)
retval = True
else:
logger.warn('Could not find import to patch call at %s' %
(phex(va)))
else: # Usually optype 7 otherwise
# Won't work if displacement exceeds 32-bit operand size
call_offset_loc = va + idc.get_item_size(va)
if abs(call_offset_loc - stub_loc) > 0x100000000:
msg = ('Call site at %s too far from %s (%s)' %
(phex(va), self._stubname(nm), phex(stub_loc)))
raise CodeGraftingDisplacementError(msg)
retval = patch_call(va, self._stubname(nm))
if retval:
if orig_cmt:
new_cmt += '\n%s' % (orig_cmt)
idc.set_cmt(va, new_cmt, 0)
ida_xref.add_cref(va, stub_loc, ida_xref.fl_CN)
return retval
for names in self._emu_stubs.keys():
for nm in names:
va = idc.get_name_ea_simple(nm)
mykutils.for_each_call_to(do_patch_call, va)
for nm, aliases in g_allocators_aliases.items():
for alias in aliases:
# do_patch_call closure will turn <nm> into stub_<nm>
mykutils.for_each_call_to(do_patch_call,
idc.get_name_ea_simple(alias))
def _unpatchCalls(self, grafted_segs):
def do_unpatch_call(va_callsite):
size = idc.get_item_size(va_callsite)
ida_xref.del_cref(va_callsite, fva_stub, 0)
cmt = idc.get_cmt(va_callsite, 0)
newcmt = cmt
# Remove automated comments
if newcmt.startswith(g_patched_call_cmt):
newcmt = newcmt[newcmt.find('\n') + 1:]
if newcmt.find('\n') == -1:
newcmt = ''
else:
newcmt = newcmt[newcmt.find('\n') + 1:]
if newcmt.startswith(g_cmt_pointed):
if newcmt.find('\n') == -1:
newcmt = ''
else:
newcmt = newcmt[newcmt.find('\n') + 1:]
if newcmt != cmt:
idc.set_cmt(va_callsite, newcmt, 0)
if idc.get_operand_type(va_callsite, 0) == ida_ua.o_mem:
patch_import(va_callsite, idc.BADADDR)
elif idc.get_operand_type(va_callsite, 0) == ida_ua.o_reg:
va_imp = self._get_imp_for_register_call(va_callsite)
if va_imp:
patch_pointer_width(va_imp, idc.BADADDR)
else:
revert_patch(va_callsite, size)
for fva_stub in idautils.Functions():
for seg in grafted_segs:
if fva_stub in seg:
mykutils.for_each_call_to(do_unpatch_call, fva_stub)
def patch_pointer_width(va, value):
g_patch_pointer_width[mykutils.get_bitness()](va, value)
def patch_import(va, target):
"""Patch the import corresponding to the call at @va to point to @target.
Args:
va (numbers.Integral): Address of call site for imported function
target (str): Name or address of new call destination for import entry
Returns:
bool: True if successful
"""
is_call = idc.print_insn_mnem(va) == 'call'
if is_call:
opno = 0
else:
logger.warn('Not a call instruction at %s' % (phex(va)))
return False
if isinstance(target, basestring):
target = idc.get_name_ea_simple(target)
patch_pointer_width(idc.get_operand_value(va, opno), target)
return True
def patch_call(va, new_nm):
"""Patch the call at @va to target @new_nm.
Args:
va (numbers.Integral): Address of the call site
new_nm (str): Name of the new call destination
Returns:
bool: True if successful
"""
is_call = idc.print_insn_mnem(va) == 'call'
if is_call:
opno = 0
new_asm = 'call %s' % (new_nm)
else:
logger.warn('Not a call instruction at %s' % (phex(va)))
return False
# Already done?
if idc.print_operand(va, opno) == new_nm:
return True
ok, code = idautils.Assemble(va, new_asm)
if not ok:
logger.warn('Failed assembling %s: %s' % (phex(va), new_asm))
return False
orig_opcode_len = idc.get_item_size(va)
new_code_len = len(code)
if orig_opcode_len < new_code_len:
logger.warn('Not enough room or wrong opcode type to patch %s: %s' %
(phex(va), new_asm))
return False
# If we actually have too much room, then add filler
if orig_opcode_len > new_code_len:
delta = orig_opcode_len - new_code_len
code += '\x90' * delta
idaapi.patch_bytes(va, code)
return True
def revert_patch(va, nr):
"""Unpatch the opcodes at @va, reverting them to their original value.
Args:
va (numbers.Integral): Address of the location of the patch to revert
nr (numbers.Integral): Number of bytes to scan and revert
Returns:
bool: True if patched bytes were restored
"""
ret = False
orig = [ida_bytes.get_original_byte(va + i) for i in range(nr)]
current = [idc.get_wide_byte(va + i) for i in range(nr)]
for i in range(len(orig)):
if orig[i] != current[i]:
ret = True
idaapi.patch_byte(va + i, orig[i])
return ret
|
ga7g08/sympy | refs/heads/master | sympy/physics/matrices.py | 91 | """Known matrices related to physics"""
from __future__ import print_function, division
from sympy import Matrix, I, pi, sqrt
from sympy.functions import exp
from sympy.core.compatibility import range
def msigma(i):
r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`
References
==========
.. [1] http://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
Matrix([
[0, 1],
[1, 0]])
"""
if i == 1:
mat = ( (
(0, 1),
(1, 0)
) )
elif i == 2:
mat = ( (
(0, -I),
(I, 0)
) )
elif i == 3:
mat = ( (
(1, 0),
(0, -1)
) )
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of `(dx, dy, dz)` for a body of mass m.
Examples
========
To translate a body having a mass of 2 units a distance of 1 unit along
the `x`-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2, 1, 0, 0)
Matrix([
[0, 0, 0],
[0, 2, 0],
[0, 0, 2]])
"""
dxdy = -dx*dy
dydz = -dy*dz
dzdx = -dz*dx
dxdx = dx**2
dydy = dy**2
dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu, lower=False):
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
(Dirac) representation.
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
We use a convention:
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0]])
"""
if not mu in [0, 1, 2, 3, 5]:
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
)
elif mu == 1:
mat = (
(0, 0, 0, 1),
(0, 0, 1, 0),
(0, -1, 0, 0),
(-1, 0, 0, 0)
)
elif mu == 2:
mat = (
(0, 0, 0, -I),
(0, 0, I, 0),
(0, I, 0, 0),
(-I, 0, 0, 0)
)
elif mu == 3:
mat = (
(0, 0, 1, 0),
(0, 0, 0, -1),
(-1, 0, 0, 0),
(0, 1, 0, 0)
)
elif mu == 5:
mat = (
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 0),
(0, 1, 0, 0)
)
m = Matrix(mat)
if lower:
if mu in [1, 2, 3, 5]:
m = -m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
))
def mdft(n):
r"""
Returns an expression of a discrete Fourier transform as a matrix multiplication.
It is an n X n matrix.
References
==========
.. [1] https://en.wikipedia.org/wiki/DFT_matrix
Examples
========
>>> from sympy.physics.matrices import mdft
>>> mdft(3)
Matrix([
[sqrt(3)/3, sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(-4*I*pi/3)/3],
[sqrt(3)/3, sqrt(3)*exp(-4*I*pi/3)/3, sqrt(3)*exp(-8*I*pi/3)/3]])
"""
mat = [[None for x in range(n)] for y in range(n)]
base = exp(-2*pi*I/n)
mat[0] = [1]*n
for i in range(n):
mat[i][0] = 1
for i in range(1, n):
for j in range(i, n):
mat[i][j] = mat[j][i] = base**(i*j)
return (1/sqrt(n))*Matrix(mat)
|
mitchrule/Miscellaneous | refs/heads/master | Django_Project/django/Lib/site-packages/django/db/backends/sqlite3/introspection.py | 42 | import re
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
fxia22/ASM_xf | refs/heads/master | PythonD/lib/python2.4/xml/dom/pulldom.py | 322 | import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
|
vadimtk/chrome4sdp | refs/heads/master | tools/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend.py | 6 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import pprint
import shlex
import sys
from telemetry.core import exceptions
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import extension_backend
from telemetry.internal.backends.chrome import system_info_backend
from telemetry.internal.backends.chrome import tab_list_backend
from telemetry.internal.backends.chrome_inspector import devtools_client_backend
from telemetry.internal.browser import user_agent
from telemetry.internal.browser import web_contents
from telemetry.internal import forwarders
from telemetry.testing import options_for_unittests
from telemetry.util import wpr_modes
class ChromeBrowserBackend(browser_backend.BrowserBackend):
"""An abstract class for chrome browser backends. Provides basic functionality
once a remote-debugger port has been established."""
# It is OK to have abstract methods. pylint: disable=W0223
def __init__(self, platform_backend, supports_tab_control,
supports_extensions, browser_options, output_profile_path,
extensions_to_load):
super(ChromeBrowserBackend, self).__init__(
platform_backend=platform_backend,
supports_extensions=supports_extensions,
browser_options=browser_options,
tab_list_backend=tab_list_backend.TabListBackend)
self._port = None
self._supports_tab_control = supports_tab_control
self._devtools_client = None
self._system_info_backend = None
self._output_profile_path = output_profile_path
self._extensions_to_load = extensions_to_load
if browser_options.netsim:
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(443, 443),
dns=forwarders.PortPair(53, 53))
else:
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(0, 0),
https=forwarders.PortPair(0, 0),
dns=None)
if (self.browser_options.dont_override_profile and
not options_for_unittests.AreSet()):
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
@property
def devtools_client(self):
return self._devtools_client
@property
@decorators.Cache
def extension_backend(self):
if not self.supports_extensions:
return None
return extension_backend.ExtensionBackendDict(self)
def _ArgsNeedProxyServer(self, args):
"""Returns True if args for Chrome indicate the need for proxy server."""
if '--enable-spdy-proxy-auth' in args:
return True
return [arg for arg in args if arg.startswith('--proxy-server=')]
def GetBrowserStartupArgs(self):
args = []
args.extend(self.browser_options.extra_browser_args)
args.append('--enable-net-benchmarking')
args.append('--metrics-recording-only')
args.append('--no-default-browser-check')
args.append('--no-first-run')
# Turn on GPU benchmarking extension for all runs. The only side effect of
# the extension being on is that render stats are tracked. This is believed
# to be effectively free. And, by doing so here, it avoids us having to
# programmatically inspect a pageset's actions in order to determine if it
# might eventually scroll.
args.append('--enable-gpu-benchmarking')
# Set --no-proxy-server to work around some XP issues unless
# some other flag indicates a proxy is needed.
if not self._ArgsNeedProxyServer(args):
self.browser_options.no_proxy_server = True
if self.browser_options.disable_background_networking:
args.append('--disable-background-networking')
args.extend(self.GetReplayBrowserStartupArgs())
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.browser_options.browser_user_agent_type))
extensions = [extension.local_path
for extension in self._extensions_to_load
if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.local_path
for extension in self._extensions_to_load
if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
if self.browser_options.no_proxy_server:
args.append('--no-proxy-server')
if self.browser_options.disable_component_extensions_with_background_pages:
args.append('--disable-component-extensions-with-background-pages')
# Disables the start page, as well as other external apps that can
# steal focus or make measurements inconsistent.
if self.browser_options.disable_default_apps:
args.append('--disable-default-apps')
if self.browser_options.enable_logging:
args.append('--enable-logging')
args.append('--v=1')
return args
def _UseHostResolverRules(self):
"""Returns True to add --host-resolver-rules to send requests to replay."""
if self._platform_backend.forwarder_factory.does_forwarder_override_dns:
# Avoid --host-resolver-rules when the forwarder will map DNS requests
# from the target platform to replay (on the host platform).
# This allows the browser to exercise DNS requests.
return False
if self.browser_options.netsim and self.platform_backend.is_host_platform:
# Avoid --host-resolver-rules when replay will configure the platform to
# resolve hosts to replay.
# This allows the browser to exercise DNS requests.
return False
return True
def GetReplayBrowserStartupArgs(self):
if self.browser_options.wpr_mode == wpr_modes.WPR_OFF:
return []
replay_args = []
if self.should_ignore_certificate_errors:
# Ignore certificate errors if the platform backend has not created
# and installed a root certificate.
replay_args.append('--ignore-certificate-errors')
if self._UseHostResolverRules():
# Force hostnames to resolve to the replay's host_ip.
replay_args.append('--host-resolver-rules=MAP * %s,EXCLUDE localhost' %
self._platform_backend.forwarder_factory.host_ip)
# Force the browser to send HTTP/HTTPS requests to fixed ports if they
# are not the standard HTTP/HTTPS ports.
http_port = self.platform_backend.wpr_http_device_port
https_port = self.platform_backend.wpr_https_device_port
if http_port != 80:
replay_args.append('--testing-fixed-http-port=%s' % http_port)
if https_port != 443:
replay_args.append('--testing-fixed-https-port=%s' % https_port)
return replay_args
def HasBrowserFinishedLaunching(self):
assert self._port, 'No DevTools port info available.'
return devtools_client_backend.IsDevToolsAgentAvailable(self._port)
def _InitDevtoolsClientBackend(self, remote_devtools_port=None):
""" Initiate the devtool client backend which allow browser connection
through browser' devtool.
Args:
remote_devtools_port: The remote devtools port, if
any. Otherwise assumed to be the same as self._port.
"""
assert not self._devtools_client, (
'Devtool client backend cannot be init twice')
self._devtools_client = devtools_client_backend.DevToolsClientBackend(
self._port, remote_devtools_port or self._port, self)
def _WaitForBrowserToComeUp(self):
""" Wait for browser to come up. """
try:
timeout = self.browser_options.browser_startup_timeout
util.WaitFor(self.HasBrowserFinishedLaunching, timeout=timeout)
except (exceptions.TimeoutException, exceptions.ProcessGoneException) as e:
if not self.IsBrowserRunning():
raise exceptions.BrowserGoneException(self.browser, e)
raise exceptions.BrowserConnectionGoneException(self.browser, e)
def _WaitForExtensionsToLoad(self):
""" Wait for all extensions to load.
Be sure to check whether the browser_backend supports_extensions before
calling this method.
"""
assert self._supports_extensions
assert self._devtools_client, (
'Waiting for extensions required devtool client to be initiated first')
try:
util.WaitFor(self._AllExtensionsLoaded, timeout=60)
except exceptions.TimeoutException:
logging.error('ExtensionsToLoad: ' +
repr([e.extension_id for e in self._extensions_to_load]))
logging.error('Extension list: ' +
pprint.pformat(self.extension_backend, indent=4))
raise
def _AllExtensionsLoaded(self):
# Extension pages are loaded from an about:blank page,
# so we need to check that the document URL is the extension
# page in addition to the ready state.
extension_ready_js = """
document.URL.lastIndexOf('chrome-extension://%s/', 0) == 0 &&
(document.readyState == 'complete' ||
document.readyState == 'interactive')
"""
for e in self._extensions_to_load:
try:
extension_objects = self.extension_backend[e.extension_id]
except KeyError:
return False
for extension_object in extension_objects:
try:
res = extension_object.EvaluateJavaScript(
extension_ready_js % e.extension_id)
except exceptions.EvaluateException:
# If the inspected page is not ready, we will get an error
# when we evaluate a JS expression, but we can just keep polling
# until the page is ready (crbug.com/251913).
res = None
# TODO(tengs): We don't have full support for getting the Chrome
# version before launch, so for now we use a generic workaround to
# check for an extension binding bug in old versions of Chrome.
# See crbug.com/263162 for details.
if res and extension_object.EvaluateJavaScript(
'chrome.runtime == null'):
extension_object.Reload()
if not res:
return False
return True
@property
def browser_directory(self):
raise NotImplementedError()
@property
def profile_directory(self):
raise NotImplementedError()
@property
def supports_tab_control(self):
return self._supports_tab_control
@property
def supports_tracing(self):
return True
def StartTracing(self, trace_options, custom_categories=None,
timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
"""
Args:
trace_options: An tracing_options.TracingOptions instance.
custom_categories: An optional string containing a list of
comma separated categories that will be traced
instead of the default category set. Example: use
"webkit,cc,disabled-by-default-cc.debug" to trace only
those three event categories.
"""
return self.devtools_client.StartChromeTracing(
trace_options, custom_categories, timeout)
def StopTracing(self, trace_data_builder):
self.devtools_client.StopChromeTracing(trace_data_builder)
def GetProcessName(self, cmd_line):
"""Returns a user-friendly name for the process of the given |cmd_line|."""
if not cmd_line:
# TODO(tonyg): Eventually we should make all of these known and add an
# assertion.
return 'unknown'
if 'nacl_helper_bootstrap' in cmd_line:
return 'nacl_helper_bootstrap'
if ':sandboxed_process' in cmd_line:
return 'renderer'
if ':privileged_process' in cmd_line:
return 'gpu-process'
args = shlex.split(cmd_line)
types = [arg.split('=')[1] for arg in args if arg.startswith('--type=')]
if not types:
return 'browser'
return types[0]
def Close(self):
if self._devtools_client:
self._devtools_client.Close()
self._devtools_client = None
@property
def supports_system_info(self):
return self.GetSystemInfo() != None
def GetSystemInfo(self):
if self._system_info_backend is None:
self._system_info_backend = system_info_backend.SystemInfoBackend(
self._port)
return self._system_info_backend.GetSystemInfo()
@property
def supports_memory_dumping(self):
return True
def DumpMemory(self, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
return self.devtools_client.DumpMemory(timeout)
@property
def supports_cpu_metrics(self):
return True
@property
def supports_memory_metrics(self):
return True
@property
def supports_power_metrics(self):
return True
|
ProfessorX/Config | refs/heads/master | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtGui/QSyntaxHighlighter.py | 2 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QSyntaxHighlighter(__PyQt4_QtCore.QObject):
"""
QSyntaxHighlighter(QTextEdit)
QSyntaxHighlighter(QTextDocument)
QSyntaxHighlighter(QObject)
"""
def currentBlock(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.currentBlock() -> QTextBlock """
return QTextBlock
def currentBlockState(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.currentBlockState() -> int """
return 0
def currentBlockUserData(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.currentBlockUserData() -> QTextBlockUserData """
return QTextBlockUserData
def document(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.document() -> QTextDocument """
return QTextDocument
def format(self, p_int): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.format(int) -> QTextCharFormat """
return QTextCharFormat
def highlightBlock(self, QString): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.highlightBlock(QString) """
pass
def previousBlockState(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.previousBlockState() -> int """
return 0
def rehighlight(self): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.rehighlight() """
pass
def rehighlightBlock(self, QTextBlock): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.rehighlightBlock(QTextBlock) """
pass
def setCurrentBlockState(self, p_int): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.setCurrentBlockState(int) """
pass
def setCurrentBlockUserData(self, QTextBlockUserData): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.setCurrentBlockUserData(QTextBlockUserData) """
pass
def setDocument(self, QTextDocument): # real signature unknown; restored from __doc__
""" QSyntaxHighlighter.setDocument(QTextDocument) """
pass
def setFormat(self, p_int, p_int_1, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QSyntaxHighlighter.setFormat(int, int, QTextCharFormat)
QSyntaxHighlighter.setFormat(int, int, QColor)
QSyntaxHighlighter.setFormat(int, int, QFont)
"""
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
|
openmv/openmv | refs/heads/master | scripts/examples/OpenMV/14-WiFi-Shield/ntp.py | 3 | # NTP Example
#
# This example shows how to get the current time using NTP with the WiFi shield.
import network, usocket, ustruct, utime
SSID='' # Network SSID
KEY='' # Network key
TIMESTAMP = 2208988800+946684800
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
# Create new socket
client = usocket.socket(usocket.AF_INET, usocket.SOCK_DGRAM)
# Get addr info via DNS
addr = usocket.getaddrinfo("pool.ntp.org", 123)[0][4]
# Send query
client.sendto('\x1b' + 47 * '\0', addr)
data, address = client.recvfrom(1024)
# Print time
t = ustruct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP
print ("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (utime.localtime(t)[0:6]))
|
Javex/mixminion | refs/heads/master | lib/mixminion/__init__.py | 6 | # Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion
Client and shared code for type III anonymous remailers.
"""
# This version string is generated from setup.py; don't edit it.
__version__ = "0.0.8alpha3"
# This 5-tuple encodes the version number for comparison. Don't edit it.
# The first 3 numbers are the version number; the 4th is:
# 0 for alpha
# 50 for beta
# 99 for release candidate
# 100 for release.
# The 5th is a patchlevel. If -1, it is suppressed.
# The 4th or 5th number may be a string. If so, it is not meant to
# succeed or precede any other sub-version with the same a.b.c version
# number.
version_info = (0, 0, 8, 0, 3)
__all__ = [ 'server', 'directory' ]
def version_tuple_to_string(t):
assert len(t) == 5
major, minor, sub, status, patch = t
if status == 0:
s1 = "alpha"
elif status == 50:
s1 = "beta"
elif status == 98:
s1 = "pre"
elif status == 99:
s1 = "rc"
elif status == 100:
s1 = ""
elif type(status) == type(1):
s1 = "(%s)"%status
else:
s1 = status
if patch != -1:
if not s1:
s2 = ".%s"%patch
else:
s2 = str(patch)
else:
s2 = ""
return "%s.%s.%s%s%s" % (t[0],t[1],t[2],s1,s2)
def parse_version_string(s):
import re
r = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:([^\d\(]+|\(\d+\))(\d+)?)?')
m = r.match(s)
if not m:
raise ValueError
major, minor, sub, status, patch = m.groups()
if not status or status in ('.', 'p'):
status = 100
elif status == 'rc':
status = 99
elif status == 'pre':
status = 98
elif status == 'beta':
status = 50
elif status == 'alpha':
status = 0
elif status[0] == '(' and status[-1] == ')':
try:
status = int(status[1:-1])
except ValueError:
status = status
else:
status = status
if not patch:
patch = -1
else:
try:
patch = int(patch)
except ValueError:
patch = patch
return (int(major), int(minor), int(sub), status, patch)
def cmp_versions(a,b):
r = cmp(a[0],b[0])
if r: return r
r = cmp(a[1],b[1])
if r: return r
r = cmp(a[2],b[2])
if r: return r
if type(a[3]) == type(b[3]) == type(1):
r = cmp(a[3],b[3])
if r: return r
elif a[3] != b[3]:
raise ValueError, "Can't compare versions"
return cmp(a[4],b[4])
assert __version__ == version_tuple_to_string(version_info)
assert parse_version_string(__version__) == version_info
assert cmp_versions(version_info, version_info) == 0
## This next segment keeps pychecker from making spurious complaints.
import sys
if sys.modules.has_key("pychecker"):
import mixminion.ClientMain
import mixminion.server
import mixminion.test
import mixminion.testSupport
del sys
|
msebire/intellij-community | refs/heads/master | python/lib/Lib/codecs.py | 116 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import __builtin__, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = '\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = '\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = '\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
self.buffer = "" # unencoded input that is kept between calls to encode()
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the decoder to the initial state.
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete byte
sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
self.buffer = "" # undecoded input that is kept between calls to decode()
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = ""
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = ""
# For str->str decoding this will stay a str
# For str->unicode decoding the first read will promote it to unicode
self.charbuffer = ""
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request can be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(False)[0]
return line
readsize = size or 72
line = ""
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if data.endswith("\r"):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = "".join(lines[1:]) + self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(False)[0]
break
if readsize<8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = ""
self.charbuffer = u""
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.reset()
self.stream.seek(offset, whence)
def next(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def next(self):
""" Return the next decoded line from the input stream."""
return self.reader.next()
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode("", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i]=i
return res
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
huanpc/IoT-1 | refs/heads/master | gui/controller/.venv/lib/python3.5/site-packages/django/conf/locale/zh_Hant/formats.py | 1008 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
nicolas998/Op_Radar | refs/heads/master | 06_Codigos/Genera_Mapa_Slides.py | 1 | #!/usr/bin/env python
import argparse
import textwrap
import numpy as np
import os
from wmf import wmf
import pylab as pl
#Parametros de entrada del trazador
parser=argparse.ArgumentParser(
prog='Genera_Grafica_Qsim',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Genera la figura de caudales simulados para un periodo asignado de tiempo, de forma
adicional presenta el hietograma de precipitacion.
'''))
#Parametros obligatorios
parser.add_argument("cuenca",help="Cuenca con la estructura que hace todo")
parser.add_argument("slides",help="Ruta a la carpeta con binarios donde estan los slides")
parser.add_argument("ruta",help="Ruta donde se guarda la figura con slides")
parser.add_argument("-c", "--coord",help="Escribe archivo con coordenadas", default = False, type = bool)
#lee todos los argumentos
args=parser.parse_args()
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
#Lectura de cuenc ay variables
cu = wmf.SimuBasin(rute=args.cuenca, SimSlides = True)
wmf.models.slide_allocate(cu.ncells, 10)
R = np.copy(wmf.models.sl_riskvector)
R[R == 2] = 1
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
#Configura rutas
if args.ruta.endswith('.png'):
ruta_texto = args.ruta[:-4] + '.txt'
ruta_plot = args.ruta
else:
ruta_texto = args.ruta + '.txt'
ruta_plot = args.ruta + '.png'
L = os.listdir(args.slides)
for l in L:
#ruta de la imagen
rutaImagen = args.ruta + l + '.png'
#plot
s, r = wmf.models.read_int_basin(args.slides + l,1,cu.ncells)
r2 = R + s
Coord = cu.Plot_basinClean(r2, rutaImagen, cmap = pl.get_cmap('viridis',3), figsize = (30,15))
#Guarda archuivo con coordenadas
if args.coord:
f = open(ruta_texto, 'w')
for t,i in zip(['Left', 'Right', 'Bottom', 'Top'], Coord):
f.write('%s, \t %.4f \n' % (t,i))
f.close()
|
maartenq/ansible | refs/heads/devel | lib/ansible/modules/storage/purestorage/purefa_volume.py | 24 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_volume
version_added: '2.4'
short_description: Manage volumes on Pure Storage FlashArrays
description:
- Create, delete or extend the capacity of a volume on Pure Storage FlashArray.
author:
- Simon Dodsley (@sdodsley)
options:
name:
description:
- The name of the volume.
required: true
target:
description:
- The name of the target volume, if copying.
state:
description:
- Define whether the volume should exist or not.
default: present
choices: [ absent, present ]
eradicate:
description:
- Define whether to eradicate the volume on delete or leave in trash.
type: bool
default: 'no'
overwrite:
description:
- Define whether to overwrite a target volume if it already exisits.
type: bool
default: 'no'
size:
description:
- Volume size in M, G, T or P units.
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Create new volume named foo
purefa_volume:
name: foo
size: 1T
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Extend the size of an existing volume named foo
purefa_volume:
name: foo
size: 2T
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Delete and eradicate volume named foo
purefa_volume:
name: foo
eradicate: yes
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
- name: Create clone of volume bar named foo
purefa_volume:
name: foo
target: bar
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Overwrite volume bar with volume foo
purefa_volume:
name: foo
target: bar
overwrite: yes
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
'''
RETURN = r'''
'''
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def human_to_bytes(size):
"""Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
"""
bytes = size[:-1]
unit = size[-1]
if bytes.isdigit():
bytes = int(bytes)
if unit == 'P':
bytes *= 1125899906842624
elif unit == 'T':
bytes *= 1099511627776
elif unit == 'G':
bytes *= 1073741824
elif unit == 'M':
bytes *= 1048576
else:
bytes = 0
else:
bytes = 0
return bytes
def get_volume(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['name'])
except:
return None
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except:
return None
def create_volume(module, array):
"""Create Volume"""
size = module.params['size']
changed = True
if not module.check_mode:
try:
array.create_volume(module.params['name'], size)
except:
changed = False
module.exit_json(changed=changed)
def copy_from_volume(module, array):
"""Create Volume Clone"""
changed = False
tgt = get_target(module, array)
if tgt is None:
changed = True
if not module.check_mode:
array.copy_volume(module.params['name'],
module.params['target'])
elif tgt is not None and module.params['overwrite']:
changed = True
if not module.check_mode:
array.copy_volume(module.params['name'],
module.params['target'],
overwrite=module.params['overwrite'])
module.exit_json(changed=changed)
def update_volume(module, array):
"""Update Volume"""
changed = True
vol = array.get_volume(module.params['name'])
if human_to_bytes(module.params['size']) > vol['size']:
if not module.check_mode:
array.extend_volume(module.params['name'], module.params['size'])
else:
changed = False
module.exit_json(changed=changed)
def delete_volume(module, array):
""" Delete Volume"""
changed = True
if not module.check_mode:
try:
array.destroy_volume(module.params['name'])
if module.params['eradicate']:
try:
array.eradicate_volume(module.params['name'])
except:
changed = False
except:
changed = False
module.exit_json(changed=True)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
target=dict(type='str'),
overwrite=dict(type='bool', default=False),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
size=dict(type='str'),
))
mutually_exclusive = [['size', 'target']]
module = AnsibleModule(argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
size = module.params['size']
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
target = get_target(module, array)
if state == 'present' and not volume and size:
create_volume(module, array)
elif state == 'present' and volume and size:
update_volume(module, array)
elif state == 'present' and volume and target:
copy_from_volume(module, array)
elif state == 'present' and volume and not target:
copy_from_volume(module, array)
elif state == 'absent' and volume:
delete_volume(module, array)
elif state == 'present' and not volume or not size:
module.exit_json(changed=False)
elif state == 'absent' and not volume:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
meganchang/Stem | refs/heads/master | test/unit/descriptor/extrainfo_descriptor.py | 3 | """
Unit tests for stem.descriptor.extrainfo_descriptor.
"""
import datetime
import unittest
from stem.descriptor.extrainfo_descriptor import ExtraInfoDescriptor, DirResponses, DirStats
CRYPTO_BLOB = """
K5FSywk7qvw/boA4DQcqkls6Ize5vcBYfhQ8JnOeRQC9+uDxbnpm3qaYN9jZ8myj
k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
7LZqklu+gVvhMKREpchVqlAwXkWR44VENm24Hs+mT3M=
"""
EXTRAINFO_DESCRIPTOR_ATTR = (
("extra-info", "ninja B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48"),
("published", "2012-05-05 17:03:50"),
("router-signature", "\n-----BEGIN SIGNATURE-----%s-----END SIGNATURE-----" % CRYPTO_BLOB),
)
def _make_descriptor(attr = None, exclude = None):
"""
Constructs a minimal extrainfo descriptor with the given attributes.
Arguments:
attr (dict) - keyword/value mappings to be included in the descriptor
exclude (list) - mandatory keywords to exclude from the descriptor
Returns:
str with customized descriptor content
"""
descriptor_lines = []
if attr is None: attr = {}
if exclude is None: exclude = []
attr = dict(attr) # shallow copy since we're destructive
for keyword, value in EXTRAINFO_DESCRIPTOR_ATTR:
if keyword in exclude: continue
elif keyword in attr:
value = attr[keyword]
del attr[keyword]
# if this is the last entry then we should dump in any unused attributes
if keyword == "router-signature":
for attr_keyword, attr_value in attr.items():
descriptor_lines.append("%s %s" % (attr_keyword, attr_value))
descriptor_lines.append("%s %s" % (keyword, value))
return "\n".join(descriptor_lines)
class TestExtraInfoDescriptor(unittest.TestCase):
def test_minimal_extrainfo_descriptor(self):
"""
Basic sanity check that we can parse an extrainfo descriptor with minimal
attributes.
"""
desc_text = _make_descriptor()
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals("ninja", desc.nickname)
self.assertEquals("B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48", desc.fingerprint)
self.assertTrue(CRYPTO_BLOB in desc.signature)
def test_unrecognized_line(self):
"""
Includes unrecognized content in the descriptor.
"""
desc_text = _make_descriptor({"pepperjack": "is oh so tasty!"})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(["pepperjack is oh so tasty!"], desc.get_unrecognized_lines())
def test_proceeding_line(self):
"""
Includes a line prior to the 'extra-info' entry.
"""
desc_text = "exit-streams-opened port=80\n" + _make_descriptor()
self._expect_invalid_attr(desc_text)
def test_trailing_line(self):
"""
Includes a line after the 'router-signature' entry.
"""
desc_text = _make_descriptor() + "\nexit-streams-opened port=80"
self._expect_invalid_attr(desc_text)
def test_extrainfo_line_missing_fields(self):
"""
Checks that validation catches when the extra-info line is missing fields
and that without validation both the nickname and fingerprint are left as
None.
"""
test_entries = (
"ninja",
"ninja ",
"B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48",
" B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48",
)
for entry in test_entries:
desc_text = _make_descriptor({"extra-info": entry})
desc = self._expect_invalid_attr(desc_text, "nickname")
self.assertEquals(None, desc.nickname)
self.assertEquals(None, desc.fingerprint)
def test_geoip_db_digest(self):
"""
Parses the geoip-db-digest line with valid and invalid data.
"""
geoip_db_digest = "916A3CA8B7DF61473D5AE5B21711F35F301CE9E8"
desc_text = _make_descriptor({"geoip-db-digest": geoip_db_digest})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(geoip_db_digest, desc.geoip_db_digest)
test_entries = (
"",
"916A3CA8B7DF61473D5AE5B21711F35F301CE9E",
"916A3CA8B7DF61473D5AE5B21711F35F301CE9E88",
"916A3CA8B7DF61473D5AE5B21711F35F301CE9EG",
"916A3CA8B7DF61473D5AE5B21711F35F301CE9E-",
)
for entry in test_entries:
desc_text = _make_descriptor({"geoip-db-digest": entry})
self._expect_invalid_attr(desc_text, "geoip_db_digest", entry)
def test_cell_circuits_per_decile(self):
"""
Parses the cell-circuits-per-decile line with valid and invalid data.
"""
test_entries = (
("0", 0),
("11", 11),
)
for entry in ("0", "11", "25"):
desc_text = _make_descriptor({"cell-circuits-per-decile": entry})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(int(entry), desc.cell_circuits_per_decile)
test_entries = (
"",
" ",
"-5",
"blarg",
)
for entry in test_entries:
desc_text = _make_descriptor({"cell-circuits-per-decile": entry})
self._expect_invalid_attr(desc_text, "cell_circuits_per_decile")
def test_dir_response_lines(self):
"""
Parses the dirreq-v2-resp and dirreq-v3-resp lines with valid and invalid
data.
"""
for keyword in ("dirreq-v2-resp", "dirreq-v3-resp"):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('resp', 'responses')
unknown_attr = attr + "_unknown"
test_value = "ok=0,unavailable=0,not-found=984,not-modified=0,something-new=7"
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(0, getattr(desc, attr)[DirResponses.OK])
self.assertEquals(0, getattr(desc, attr)[DirResponses.UNAVAILABLE])
self.assertEquals(984, getattr(desc, attr)[DirResponses.NOT_FOUND])
self.assertEquals(0, getattr(desc, attr)[DirResponses.NOT_MODIFIED])
self.assertEquals(7, getattr(desc, unknown_attr)["something-new"])
test_entries = (
"ok=-4",
"ok:4",
"ok=4.not-found=3",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
desc = self._expect_invalid_attr(desc_text)
self.assertEqual({}, getattr(desc, attr))
self.assertEqual({}, getattr(desc, unknown_attr))
def test_dir_stat_lines(self):
"""
Parses the dirreq-v2-direct-dl, dirreq-v3-direct-dl, dirreq-v2-tunneled-dl,
and dirreq-v3-tunneled-dl lines with valid and invalid data.
"""
for keyword in ("dirreq-v2-direct-dl", "dirreq-v2-direct-dl", "dirreq-v2-tunneled-dl", "dirreq-v2-tunneled-dl"):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
unknown_attr = attr + "_unknown"
test_value = "complete=2712,timeout=32,running=4,min=741,d1=14507,d2=22702,q1=28881,d3=38277,d4=73729,md=111455,d6=168231,d7=257218,q3=319833,d8=390507,d9=616301,something-new=11,max=29917857"
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(2712, getattr(desc, attr)[DirStats.COMPLETE])
self.assertEquals(32, getattr(desc, attr)[DirStats.TIMEOUT])
self.assertEquals(4, getattr(desc, attr)[DirStats.RUNNING])
self.assertEquals(741, getattr(desc, attr)[DirStats.MIN])
self.assertEquals(14507, getattr(desc, attr)[DirStats.D1])
self.assertEquals(22702, getattr(desc, attr)[DirStats.D2])
self.assertEquals(28881, getattr(desc, attr)[DirStats.Q1])
self.assertEquals(38277, getattr(desc, attr)[DirStats.D3])
self.assertEquals(73729, getattr(desc, attr)[DirStats.D4])
self.assertEquals(111455, getattr(desc, attr)[DirStats.MD])
self.assertEquals(168231, getattr(desc, attr)[DirStats.D6])
self.assertEquals(257218, getattr(desc, attr)[DirStats.D7])
self.assertEquals(319833, getattr(desc, attr)[DirStats.Q3])
self.assertEquals(390507, getattr(desc, attr)[DirStats.D8])
self.assertEquals(616301, getattr(desc, attr)[DirStats.D9])
self.assertEquals(29917857, getattr(desc, attr)[DirStats.MAX])
self.assertEquals(11, getattr(desc, unknown_attr)["something-new"])
test_entries = (
"complete=-4",
"complete:4",
"complete=4.timeout=3",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
desc = self._expect_invalid_attr(desc_text)
self.assertEqual({}, getattr(desc, attr))
self.assertEqual({}, getattr(desc, unknown_attr))
def test_conn_bi_direct(self):
"""
Parses the conn-bi-direct line with valid and invalid data.
"""
desc_text = _make_descriptor({"conn-bi-direct": "2012-05-03 12:07:50 (500 s) 277431,12089,0,2134"})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.conn_bi_direct_end)
self.assertEquals(500, desc.conn_bi_direct_interval)
self.assertEquals(277431, desc.conn_bi_direct_below)
self.assertEquals(12089, desc.conn_bi_direct_read)
self.assertEquals(0, desc.conn_bi_direct_write)
self.assertEquals(2134, desc.conn_bi_direct_both)
test_entries = (
"",
"2012-05-03 ",
"2012-05-03",
"2012-05-03 12:07:60 (500 s)",
"2012-05-03 12:07:50 (500s)",
"2012-05-03 12:07:50 (500 s",
"2012-05-03 12:07:50 (500 )",
"2012-05-03 12:07:50 (500 s)11",
"2012-05-03 12:07:50 (500 s) 277431,12089,0",
"2012-05-03 12:07:50 (500 s) 277431,12089,0a,2134",
"2012-05-03 12:07:50 (500 s) -277431,12089,0,2134",
)
for entry in test_entries:
desc_text = _make_descriptor({"conn-bi-direct": entry})
desc = self._expect_invalid_attr(desc_text)
self.assertEquals(None, desc.conn_bi_direct_end)
self.assertEquals(None, desc.conn_bi_direct_interval)
self.assertEquals(None, desc.conn_bi_direct_below)
self.assertEquals(None, desc.conn_bi_direct_read)
self.assertEquals(None, desc.conn_bi_direct_write)
self.assertEquals(None, desc.conn_bi_direct_both)
def test_percentage_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num%
"""
for keyword in ('dirreq-v2-share', 'dirreq-v3-share'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
test_entries = (
("0.00%", 0.0),
("0.01%", 0.0001),
("50%", 0.5),
("100.0%", 1.0),
)
for test_value, expected_value in test_entries:
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(expected_value, getattr(desc, attr))
test_entries = (
("", None),
(" ", None),
("100", None),
("100.1%", 1.001),
("-5%", -0.05),
)
for entry, expected in test_entries:
desc_text = _make_descriptor({keyword: entry})
self._expect_invalid_attr(desc_text, attr, expected)
def test_number_list_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num,...,num
"""
for keyword in ('cell-processed-cells', 'cell-queued-cells', 'cell-time-in-queue'):
attr = keyword.replace('-', '_')
test_entries = (
("", []),
(" ", []),
("0,0,0", [0.0, 0.0, 0.0]),
("2.3,-4.6,8.9,16.12,32.15", [2.3, -4.6, 8.9, 16.12, 32.15]),
)
for test_value, expected_value in test_entries:
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(expected_value, getattr(desc, attr))
test_entries = (
(",,11", [11.0]),
("abc,5.7,def", [5.7]),
("blarg", []),
)
for entry, expected in test_entries:
desc_text = _make_descriptor({keyword: entry})
self._expect_invalid_attr(desc_text, attr, expected)
def test_timestamp_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS
"""
for keyword in ('published', 'geoip-start-time'):
attr = keyword.replace('-', '_')
desc_text = _make_descriptor({keyword: "2012-05-03 12:07:50"})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, attr))
test_entries = (
"",
"2012-05-03 12:07:60",
"2012-05-03 ",
"2012-05-03",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
self._expect_invalid_attr(desc_text, attr)
def test_timestamp_and_interval_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
"""
for keyword in ('cell-stats-end', 'entry-stats-end', 'exit-stats-end', 'bridge-stats-end', 'dirreq-stats-end'):
end_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
interval_attr = end_attr[:-4] + "_interval"
desc_text = _make_descriptor({keyword: "2012-05-03 12:07:50 (500 s)"})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEquals(500, getattr(desc, interval_attr))
test_entries = (
"",
"2012-05-03 ",
"2012-05-03",
"2012-05-03 12:07:60 (500 s)",
"2012-05-03 12:07:50 (500s)",
"2012-05-03 12:07:50 (500 s",
"2012-05-03 12:07:50 (500 )",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
desc = self._expect_invalid_attr(desc_text)
self.assertEquals(None, getattr(desc, end_attr))
self.assertEquals(None, getattr(desc, interval_attr))
def test_timestamp_interval_and_value_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
"""
for keyword in ('read-history', 'write-history', 'dirreq-read-history', 'dirreq-write-history'):
base_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
end_attr = base_attr + "_end"
interval_attr = base_attr + "_interval"
values_attr = base_attr + "_values"
test_entries = (
("", []),
(" ", []),
(" 50,11,5", [50, 11, 5]),
)
for test_values, expected_values in test_entries:
desc_text = _make_descriptor({keyword: "2012-05-03 12:07:50 (500 s)%s" % test_values})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEquals(500, getattr(desc, interval_attr))
self.assertEquals(expected_values, getattr(desc, values_attr))
test_entries = (
"",
"2012-05-03 ",
"2012-05-03",
"2012-05-03 12:07:60 (500 s)",
"2012-05-03 12:07:50 (500s)",
"2012-05-03 12:07:50 (500 s",
"2012-05-03 12:07:50 (500 )",
"2012-05-03 12:07:50 (500 s)11",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
desc = self._expect_invalid_attr(desc_text)
self.assertEquals(None, getattr(desc, end_attr))
self.assertEquals(None, getattr(desc, interval_attr))
self.assertEquals(None, getattr(desc, values_attr))
def test_port_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" port=N,port=N,...
"""
for keyword in ('exit-kibibytes-written', 'exit-kibibytes-read', 'exit-streams-opened'):
attr = keyword.replace('-', '_')
test_entries = (
("", {}),
("443=100,other=111", {443: 100, 'other': 111}),
("80=115533759,443=1777,995=690", {80: 115533759, 443: 1777, 995: 690}),
)
for test_value, expected_value in test_entries:
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(expected_value, getattr(desc, attr))
test_entries = (
"8000000=115533759",
"-80=115533759",
"80=-115533759",
"=115533759",
"80=",
"80,115533759",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
self._expect_invalid_attr(desc_text, attr, {})
def test_locale_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" CC=N,CC=N,...
"""
for keyword in ('dirreq-v2-ips', 'dirreq-v3-ips', 'dirreq-v2-reqs', 'dirreq-v3-reqs', 'geoip-client-origins', 'entry-ips', 'bridge-ips'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('reqs', 'requests')
test_entries = (
("", {}),
("uk=5,de=3,jp=2", {'uk': 5, 'de': 3, 'jp': 2}),
)
for test_value, expected_value in test_entries:
desc_text = _make_descriptor({keyword: test_value})
desc = ExtraInfoDescriptor(desc_text)
self.assertEquals(expected_value, getattr(desc, attr))
test_entries = (
"uk=-4",
"uki=4",
"uk:4",
"uk=4.de=3",
)
for entry in test_entries:
desc_text = _make_descriptor({keyword: entry})
self._expect_invalid_attr(desc_text, attr, {})
def _expect_invalid_attr(self, desc_text, attr = None, expected_value = None):
"""
Asserts that construction will fail due to desc_text having a malformed
attribute. If an attr is provided then we check that it matches an expected
value when we're constructed without validation.
"""
self.assertRaises(ValueError, ExtraInfoDescriptor, desc_text)
desc = ExtraInfoDescriptor(desc_text, validate = False)
if attr:
# check that the invalid attribute matches the expected value when
# constructed without validation
self.assertEquals(expected_value, getattr(desc, attr))
else:
# check a default attribute
self.assertEquals("ninja", desc.nickname)
return desc
|
thnee/ansible | refs/heads/devel | lib/ansible/modules/network/iosxr/iosxr_lacp.py | 20 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for iosxr_lacp
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: iosxr_lacp
version_added: 2.9
short_description: Manage Global Link Aggregation Control Protocol (LACP) on IOS-XR devices.
description:
- This module manages Global Link Aggregation Control Protocol (LACP) on IOS-XR devices.
notes:
- Tested against IOS-XR 6.1.3.
- This module works with connection C(network_cli). See L(the IOS-XR Platform Options,../network/user_guide/platform_iosxr.html).
author: Nilashish Chakraborty (@nilashishc)
options:
config:
description: The provided configurations.
type: dict
suboptions:
system:
description: This option sets the default system parameters for LACP bundles.
type: dict
suboptions:
priority:
description:
- The system priority to use in LACP negotiations.
- Lower value is higher priority.
- Refer to vendor documentation for valid values.
type: int
mac:
type: dict
description:
- The system MAC related configuration for LACP.
suboptions:
address:
description:
- The system ID to use in LACP negotiations.
type: str
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
#
#
# ------------
# Before state
# ------------
#
#
# RP/0/0/CPU0:iosxr01#show running-config lacp
# Tue Jul 16 17:46:08.147 UTC
# % No such configuration item(s)
#
#
- name: Merge provided configuration with device configuration
iosxr_lacp:
config:
system:
priority: 10
mac:
address: 00c1.4c00.bd15
state: merged
#
#
# -----------------------
# Module Execution Result
# -----------------------
#
# "before": {}
#
#
# "commands": [
# "lacp system priority 10",
# "lacp system mac 00c1.4c00.bd15"
# ]
#
#
# "after": {
# "system": {
# "mac": {
# "address": "00c1.4c00.bd15"
# },
# "priority": 10
# }
# }
#
# -----------
# After state
# -----------
#
#
# RP/0/0/CPU0:iosxr01#sh run lacp
# Tue Jul 16 17:51:29.365 UTC
# lacp system mac 00c1.4c00.bd15
# lacp system priority 10
#
#
# Using replaced
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/0/CPU0:iosxr01#sh run lacp
# Tue Jul 16 17:53:59.904 UTC
# lacp system mac 00c1.4c00.bd15
# lacp system priority 10
#
- name: Replace device global lacp configuration with the given configuration
iosxr_lacp:
config:
system:
priority: 11
state: replaced
#
#
# -----------------------
# Module Execution Result
# -----------------------
# "before": {
# "system": {
# "mac": {
# "address": "00c1.4c00.bd15"
# },
# "priority": 10
# }
# }
#
#
# "commands": [
# "no lacp system mac",
# "lacp system priority 11"
# ]
#
#
# "after": {
# "system": {
# "priority": 11
# }
# }
#
# -----------
# After state
# -----------
#
#
# RP/0/0/CPU0:iosxr01#sh run lacp
# Tue Jul 16 18:02:40.379 UTC
# lacp system priority 11
#
#
# Using deleted
#
#
# ------------
# Before state
# ------------
#
#
# RP/0/0/CPU0:iosxr01#sh run lacp
# Tue Jul 16 18:37:09.727 UTC
# lacp system mac 00c1.4c00.bd15
# lacp system priority 11
#
#
- name: Delete global LACP configurations from the device
iosxr_lacp:
state: deleted
#
#
# -----------------------
# Module Execution Result
# -----------------------
# "before": {
# "system": {
# "mac": {
# "address": "00c1.4c00.bd15"
# },
# "priority": 11
# }
# }
#
#
# "commands": [
# "no lacp system mac",
# "no lacp system priority"
# ]
#
#
# "after": {}
#
# ------------
# After state
# ------------
#
#
# RP/0/0/CPU0:iosxr01#sh run lacp
# Tue Jul 16 18:39:44.116 UTC
# % No such configuration item(s)
#
#
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['lacp system priority 10', 'lacp system mac 00c1.4c00.bd15']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.argspec.lacp.lacp import LacpArgs
from ansible.module_utils.network.iosxr.config.lacp.lacp import Lacp
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [('state', 'merged', ('config',)),
('state', 'replaced', ('config',))]
module = AnsibleModule(argument_spec=LacpArgs.argument_spec, required_if=required_if,
supports_check_mode=True)
result = Lacp(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
DefyVentures/edx-platform | refs/heads/master | lms/djangoapps/courseware/features/registration.py | 102 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
import time
@step('I register for the course "([^"]*)"$')
def i_register_for_the_course(_step, course):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
assert world.is_css_present('section.container.dashboard')
@step('I register to audit the course$')
def i_register_to_audit_the_course(_step):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
# When the page first loads some animation needs to
# complete before this button is in a stable location
world.retry_on_exception(
lambda: world.browser.find_by_name("honor_mode").click(),
max_attempts=10,
ignored_exceptions=AttributeError
)
time.sleep(1)
assert world.is_css_present('section.container.dashboard')
@step(u'I should see an empty dashboard message')
def i_should_see_empty_dashboard(_step):
empty_dash_css = 'section.empty-dashboard-message'
assert world.is_css_present(empty_dash_css)
@step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$')
def i_should_see_that_course_in_my_dashboard(_step, doesnt_appear, course):
course_link_css = 'section.my-courses a[href*="%s"]' % course
if doesnt_appear:
assert world.is_css_not_present(course_link_css)
else:
assert world.is_css_present(course_link_css)
@step(u'I unenroll from the course numbered "([^"]*)"')
def i_unenroll_from_that_course(_step, course):
more_actions_dropdown_link_selector = '[id*=actions-dropdown-link-0]'
assert world.is_css_present(more_actions_dropdown_link_selector)
world.css_click(more_actions_dropdown_link_selector)
unregister_css = 'li.actions-item a.action-unenroll[data-course-number*="{course_number}"][href*=unenroll-modal]'.format(course_number=course)
assert world.is_css_present(unregister_css)
world.css_click(unregister_css)
button_css = 'section#unenroll-modal input[value="Unenroll"]'
assert world.is_css_present(button_css)
world.css_click(button_css)
|
ivaano/zato | refs/heads/master | code/alembic/versions/0011_1500abb1cf3_gh184_bring_up_to_date.py | 7 | """gh184 Bring Alembic migrations up to date
Revision ID: 0011_1500abb1cf3
Revises: 0010_3f03ae0ef253
Create Date: 2014-04-11 09:25:03.206296
"""
# revision identifiers, used by Alembic.
revision = '0011_1500abb1cf3'
down_revision = '0010_3f03ae0ef253'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import CreateSequence, DropSequence
# Zato
from zato.common.odb import model
from zato.common import CLOUD, HTTP_SOAP_SERIALIZATION_TYPE, MISC, PUB_SUB
# ################################################################################################################################
add_col = op.add_column
def alter_column_nullable_false(table_name, column_name, default_value, column_type):
column = sa.sql.table(table_name, sa.sql.column(column_name))
op.execute(column.update().values({column_name:default_value}))
op.alter_column(table_name, column_name, type_=column_type, existing_type=column_type, nullable=False)
def upgrade():
op.create_unique_constraint(
'sec_base_cluster_id_username_sec_type_key', model.SecurityBase.__tablename__, ['cluster_id', 'username', 'sec_type'])
op.create_table(
model.NTLM.__tablename__,sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True)
)
op.create_table(
model.APIKeySecurity.__tablename__,sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True)
)
op.create_table(
model.XPathSecurity.__tablename__,
sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True),
sa.Column('username_expr', sa.String(200), nullable=False),
sa.Column('password_expr', sa.String(200), nullable=True),
)
add_col(
model.HTTPSOAP.__tablename__, sa.Column('serialization_type', sa.String(200), nullable=True))
alter_column_nullable_false(
model.HTTPSOAP.__tablename__, 'serialization_type', HTTP_SOAP_SERIALIZATION_TYPE.SUDS.id, sa.String(200))
add_col(
model.HTTPSOAP.__tablename__, sa.Column('timeout', sa.Integer(), nullable=True))
alter_column_nullable_false(
model.HTTPSOAP.__tablename__, 'timeout', MISC.DEFAULT_HTTP_TIMEOUT, sa.Integer())
op.create_table(
model.AWSSecurity.__tablename__, sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True))
op.execute(CreateSequence(sa.Sequence('deliv_def_seq')))
op.execute(CreateSequence(sa.Sequence('deliv_seq')))
op.execute(CreateSequence(sa.Sequence('deliv_payl_seq')))
op.execute(CreateSequence(sa.Sequence('msg_ns_seq')))
op.execute(CreateSequence(sa.Sequence('http_soap_audit_seq')))
op.create_table(
model.HTTSOAPAudit.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('http_soap_audit_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False, index=True),
sa.Column('cid', sa.String(200), nullable=False, index=True),
sa.Column('transport', sa.String(200), nullable=False, index=True),
sa.Column('connection', sa.String(200), nullable=False, index=True),
sa.Column('req_time', sa.DateTime(), nullable=False),
sa.Column('resp_time', sa.DateTime(), nullable=True),
sa.Column('user_token', sa.String(200), nullable=True, index=True),
sa.Column('invoke_ok', sa.Boolean, nullable=True),
sa.Column('auth_ok', sa.Boolean, nullable=True),
sa.Column('remote_addr', sa.String(200), nullable=False, index=True),
sa.Column('req_headers', sa.LargeBinary(), nullable=True),
sa.Column('req_payload', sa.LargeBinary(), nullable=True),
sa.Column('resp_headers', sa.LargeBinary(), nullable=True),
sa.Column('resp_payload', sa.LargeBinary(), nullable=True),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.Column('conn_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
)
op.execute(CreateSequence(sa.Sequence('htp_sp_ad_rpl_p_jp_seq')))
op.create_table(
model.HTTSOAPAuditReplacePatternsJSONPointer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('htp_sp_ad_rpl_p_jp_seq'), primary_key=True),
sa.Column('conn_id', sa.Integer, sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
sa.Column('pattern_id', sa.Integer, sa.ForeignKey('msg_json_pointer.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer, sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('conn_id','pattern_id')
)
op.execute(CreateSequence(sa.Sequence('htp_sp_ad_rpl_p_xp_seq')))
op.create_table(
model.HTTSOAPAuditReplacePatternsXPath.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('htp_sp_ad_rpl_p_xp_seq'), primary_key=True),
sa.Column('conn_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
sa.Column('pattern_id', sa.Integer(), sa.ForeignKey('msg_xpath.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('conn_id','pattern_id')
)
op.execute(CreateSequence(sa.Sequence('pub_sub_topic_seq')))
op.create_table(
model.PubSubTopic.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_topic_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('max_depth', sa.Integer(), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('pub_sub_cons_seq')))
op.create_table(
model.PubSubConsumer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_cons_seq'), primary_key=True),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('sub_key', sa.String(200), nullable=False),
sa.Column('max_backlog', sa.Integer(), nullable=False),
sa.Column('delivery_mode', sa.String(200), nullable=False),
sa.Column('callback_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=True),
sa.Column('callback_type', sa.String(20), nullable=True, default=PUB_SUB.CALLBACK_TYPE.OUTCONN_PLAIN_HTTP),
sa.Column('topic_id', sa.Integer(), sa.ForeignKey('pub_sub_topic.id', ondelete='CASCADE'), nullable=False),
sa.Column('sec_def_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('sec_def_id','topic_id','cluster_id')
)
op.create_table(
model.PubSubProducer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_cons_seq'), primary_key=True),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('topic_id', sa.Integer(), sa.ForeignKey('pub_sub_topic.id', ondelete='CASCADE'), nullable=False),
sa.Column('sec_def_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('sec_def_id','topic_id','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('os_swift_seq')))
op.create_table(
model.OpenStackSwift.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('os_swift_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('auth_url', sa.String(200), nullable=False),
sa.Column('auth_version', sa.String(200), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.AUTH_VERSION),
sa.Column('user', sa.String(200), nullable=True),
sa.Column('key', sa.String(200), nullable=True),
sa.Column('retries', sa.Integer, nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.RETRIES),
sa.Column('is_snet', sa.Boolean, nullable=False),
sa.Column('starting_backoff', sa.Integer(), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.BACKOFF_STARTING),
sa.Column('max_backoff', sa.Integer(), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.BACKOFF_MAX),
sa.Column('tenant_name', sa.String(200), nullable=True),
sa.Column('should_validate_cert', sa.Boolean, nullable=False),
sa.Column('cacert', sa.String(200), nullable=True),
sa.Column('should_retr_ratelimit', sa.Boolean, nullable=False),
sa.Column('needs_tls_compr', sa.Boolean, nullable=False),
sa.Column('custom_options', sa.String(2000), nullable=True),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('aws_s3_seq')))
op.create_table(
model.AWSS3.__tablename__,
sa.Column('id', sa.Integer, sa.Sequence('aws_s3_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('pool_size', sa.Integer(), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.POOL_SIZE),
sa.Column('address', sa.String(200), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.ADDRESS),
sa.Column('debug_level', sa.Integer, nullable=False, default=CLOUD.AWS.S3.DEFAULTS.DEBUG_LEVEL),
sa.Column('suppr_cons_slashes', sa.Boolean, nullable=False, default=True),
sa.Column('content_type', sa.String(200), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.CONTENT_TYPE),
sa.Column('metadata_', sa.String(2000), nullable=True),
sa.Column('bucket', sa.String(2000), nullable=True),
sa.Column('encrypt_at_rest', sa.Boolean, nullable=False, default=False),
sa.Column('storage_class', sa.String(200), nullable=False, default=CLOUD.AWS.S3.STORAGE_CLASS.DEFAULT),
sa.Column('security_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.alter_column(model.Cluster.__tablename__, 'odb_host', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_port', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_user', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_db_name', nullable=True)
def downgrade():
op.drop_constraint('sec_base_cluster_id_username_sec_type_key', model.SecurityBase.__tablename__)
op.drop_table(model.NTLM.__tablename__)
op.drop_table(model.AWSSecurity.__tablename__)
op.drop_table(model.APIKeySecurity.__tablename__)
op.drop_table(model.XPathSecurity.__tablename__)
op.execute(DropSequence(sa.Sequence('deliv_payl_seq')))
op.execute(DropSequence(sa.Sequence('msg_ns_seq')))
op.drop_table(model.HTTSOAPAudit.__tablename__)
op.execute(DropSequence(sa.Sequence('http_soap_audit_seq')))
op.drop_table(model.HTTSOAPAuditReplacePatternsJSONPointer.__tablename__)
op.execute(DropSequence(sa.Sequence('htp_sp_ad_rpl_p_jp_seq')))
op.drop_table(model.HTTSOAPAuditReplacePatternsXPath.__tablename__)
op.execute(DropSequence(sa.Sequence('htp_sp_ad_rpl_p_xp_seq')))
op.drop_table(model.PubSubConsumer.__tablename__)
op.execute(DropSequence(sa.Sequence('pub_sub_cons_seq')))
op.drop_table(model.PubSubProducer.__tablename__)
op.drop_table(model.OpenStackSwift.__tablename__)
op.execute(DropSequence(sa.Sequence('os_swift_seq')))
op.drop_table(model.AWSS3.__tablename__)
op.execute(DropSequence(sa.Sequence('aws_s3_seq')))
op.execute(DropSequence(sa.Sequence('deliv_seq')))
op.drop_table(model.PubSubTopic.__tablename__)
op.execute(DropSequence(sa.Sequence('pub_sub_topic_seq')))
op.execute(DropSequence(sa.Sequence('deliv_def_seq')))
op.drop_column(model.HTTPSOAP.__tablename__, 'serialization_type')
op.drop_column(model.HTTPSOAP.__tablename__, 'timeout')
op.alter_column(model.Cluster.__tablename__, 'odb_host', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_port', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_user', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_db_name', nullable=False)
|
kdwink/intellij-community | refs/heads/master | python/testData/completion/initParams.after.py | 83 | class C:
def __init__(self, auno=True): pass
c = C(auno=)
|
glorizen/nupic | refs/heads/master | tests/unit/nupic/encoders/delta_test.py | 28 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for delta encoder"""
import numpy as np
import tempfile
import unittest
from nupic.encoders.delta import (DeltaEncoder,
AdaptiveScalarEncoder)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.delta_capnp import DeltaEncoderProto
class DeltaEncoderTest(unittest.TestCase):
"""Unit tests for DeltaEncoder class"""
def setUp(self):
self._dencoder = DeltaEncoder(w=21, n=100, forced=True)
self._adaptscalar = AdaptiveScalarEncoder(w=21, n=100, forced=True)
def testDeltaEncoder(self):
"""simple delta reconstruction test"""
for i in range(5):
encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True)
self._dencoder.setStateLock(True)
for i in range(5, 7):
encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True)
res = self._dencoder.topDownCompute(encarr)
self.assertEqual(res[0].value, 6)
self.assertEqual(self._dencoder.topDownCompute(encarr)[0].value,
res[0].value)
self.assertEqual(self._dencoder.topDownCompute(encarr)[0].scalar,
res[0].scalar)
self.assertTrue(np.array_equal(
self._dencoder.topDownCompute(encarr)[0].encoding,
res[0].encoding))
def testEncodingVerification(self):
"""encoding verification test passed"""
feedIn = [1, 10, 4, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
self._dencoder.setStateLock(False)
#Check that the deltas are being returned correctly.
for i in range(len(feedIn)):
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[i], delencode, learn=True)
self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
def testLockingState(self):
"""Check that locking the state works correctly"""
feedIn = [1, 10, 9, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
for i in range(len(feedIn)):
if i == 3:
self._dencoder.setStateLock(True)
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
if i>=3:
self._dencoder.encodeIntoArray(feedIn[i]-feedIn[2], delencode,
learn=True)
else:
self._dencoder.encodeIntoArray(expectedOut[i], delencode, learn=True)
self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
def testEncodeInvalidInputType(self):
try:
self._dencoder.encode("String")
except TypeError as e:
self.assertEqual(
e.message,
"Expected a scalar input but got input of type <type 'str'>")
else:
self.fail("Should have thrown TypeError during attempt to encode string "
"with scalar encoder.")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
feedIn = [1, 10, 4, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
self._dencoder.setStateLock(False)
outp = []
#Check that the deltas are being returned correctly.
for i in range(len(feedIn)-1):
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[i], delencode, learn=True)
outp.append(delencode)
proto1 = DeltaEncoderProto.new_message()
self._dencoder.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = DeltaEncoderProto.read(f)
encoder = DeltaEncoder.read(proto2)
self.assertIsInstance(encoder, DeltaEncoder)
self.assertEqual(encoder.width, self._dencoder.width)
self.assertEqual(encoder.n, self._dencoder.n)
self.assertEqual(encoder.name, self._dencoder.name)
self.assertEqual(encoder._prevAbsolute, self._dencoder._prevAbsolute)
self.assertEqual(encoder._prevDelta, self._dencoder._prevDelta)
self.assertEqual(encoder._stateLock, self._dencoder._stateLock)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[-1], delencode, learn=True)
delencode2 = np.zeros(100)
encoder.encodeIntoArray(feedIn[-1], delencode2, learn=True)
self.assertTrue(np.array_equal(delencode, delencode2))
if __name__ == "__main__":
unittest.main()
|
CINPLA/python-neo | refs/heads/master | neo/test/coretest/test_analogsignal.py | 1 | # -*- coding: utf-8 -*-
"""
Tests of the neo.core.analogsignal.AnalogSignal class and related functions
"""
# needed for python 3 compatibility
from __future__ import division
import os
import pickle
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from numpy.testing import assert_array_equal
from neo.core.analogsignal import AnalogSignal, _get_sampling_rate
from neo.core.channelindex import ChannelIndex
from neo.core import Segment
from neo.test.tools import (assert_arrays_almost_equal,
assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict([(str(x), TEST_ANNOTATIONS[x]) for x in
range(len(TEST_ANNOTATIONS))])
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = AnalogSignal
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, AnalogSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'AnalogSignal'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, AnalogSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestAnalogSignalConstructor(unittest.TestCase):
def test__create_from_list(self):
data = range(10)
rate = 1000*pq.Hz
signal = AnalogSignal(data, sampling_rate=rate, units="mV")
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, len(data)/rate)
self.assertEqual(signal[9, 0], 9000*pq.uV)
def test__create_from_np_array(self):
data = np.arange(10.0)
rate = 1*pq.kHz
signal = AnalogSignal(data, sampling_rate=rate, units="uV")
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, data.size/rate)
self.assertEqual(signal[9, 0], 0.009*pq.mV)
def test__create_from_quantities_array(self):
data = np.arange(10.0) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignal(data, sampling_rate=rate)
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, data.size/rate)
self.assertEqual(signal[9, 0], 0.009*pq.V)
def test__create_from_array_no_units_ValueError(self):
data = np.arange(10.0)
self.assertRaises(ValueError, AnalogSignal, data,
sampling_rate=1 * pq.kHz)
def test__create_from_quantities_array_inconsistent_units_ValueError(self):
data = np.arange(10.0) * pq.mV
self.assertRaises(ValueError, AnalogSignal, data,
sampling_rate=1 * pq.kHz, units="nA")
def test__create_without_sampling_rate_or_period_ValueError(self):
data = np.arange(10.0) * pq.mV
self.assertRaises(ValueError, AnalogSignal, data)
def test__create_with_None_sampling_rate_should_raise_ValueError(self):
data = np.arange(10.0) * pq.mV
self.assertRaises(ValueError, AnalogSignal, data, sampling_rate=None)
def test__create_with_None_t_start_should_raise_ValueError(self):
data = np.arange(10.0) * pq.mV
rate = 5000 * pq.Hz
self.assertRaises(ValueError, AnalogSignal, data,
sampling_rate=rate, t_start=None)
def test__create_inconsistent_sampling_rate_and_period_ValueError(self):
data = np.arange(10.0) * pq.mV
self.assertRaises(ValueError, AnalogSignal, data,
sampling_rate=1 * pq.kHz, sampling_period=5 * pq.s)
def test__create_with_copy_true_should_return_copy(self):
data = np.arange(10.0) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignal(data, copy=True, sampling_rate=rate)
data[3] = 99*pq.mV
assert_neo_object_is_compliant(signal)
self.assertNotEqual(signal[3, 0], 99*pq.mV)
def test__create_with_copy_false_should_return_view(self):
data = np.arange(10.0) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignal(data, copy=False, sampling_rate=rate)
data[3] = 99*pq.mV
assert_neo_object_is_compliant(signal)
self.assertEqual(signal[3, 0], 99*pq.mV)
def test__create2D_with_copy_false_should_return_view(self):
data = np.arange(10.0) * pq.mV
data = data.reshape((5, 2))
rate = 5000*pq.Hz
signal = AnalogSignal(data, copy=False, sampling_rate=rate)
data[3, 0] = 99*pq.mV
assert_neo_object_is_compliant(signal)
self.assertEqual(signal[3, 0], 99*pq.mV)
def test__create_with_additional_argument(self):
signal = AnalogSignal([1, 2, 3], units="mV", sampling_rate=1*pq.kHz,
file_origin='crack.txt', ratname='Nicolas')
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.annotations, {'ratname': 'Nicolas'})
# This one is universally recommended and handled by BaseNeo
self.assertEqual(signal.file_origin, 'crack.txt')
# signal must be 1D - should raise Exception if not 1D
class TestAnalogSignalProperties(unittest.TestCase):
def setUp(self):
self.t_start = [0.0*pq.ms, 100*pq.ms, -200*pq.ms]
self.rates = [1*pq.kHz, 420*pq.Hz, 999*pq.Hz]
self.rates2 = [2*pq.kHz, 290*pq.Hz, 1111*pq.Hz]
self.data = [np.arange(10.0)*pq.nA,
np.arange(-100.0, 100.0, 10.0)*pq.mV,
np.random.uniform(size=100)*pq.uV]
self.signals = [AnalogSignal(D, sampling_rate=r, t_start=t,
testattr='test')
for r, D, t in zip(self.rates,
self.data,
self.t_start)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_stop_getter(self):
for i, signal in enumerate(self.signals):
self.assertEqual(signal.t_stop,
self.t_start[i] + self.data[i].size/self.rates[i])
def test__duration_getter(self):
for signal in self.signals:
self.assertAlmostEqual(signal.duration,
signal.t_stop - signal.t_start,
delta=1e-15)
def test__sampling_rate_getter(self):
for signal, rate in zip(self.signals, self.rates):
self.assertEqual(signal.sampling_rate, rate)
def test__sampling_period_getter(self):
for signal, rate in zip(self.signals, self.rates):
self.assertEqual(signal.sampling_period, 1 / rate)
def test__sampling_rate_setter(self):
for signal, rate in zip(self.signals, self.rates2):
signal.sampling_rate = rate
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.sampling_rate, rate)
self.assertEqual(signal.sampling_period, 1 / rate)
def test__sampling_period_setter(self):
for signal, rate in zip(self.signals, self.rates2):
signal.sampling_period = 1 / rate
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.sampling_rate, rate)
self.assertEqual(signal.sampling_period, 1 / rate)
def test__sampling_rate_setter_None_ValueError(self):
self.assertRaises(ValueError, setattr, self.signals[0],
'sampling_rate', None)
def test__sampling_rate_setter_not_quantity_ValueError(self):
self.assertRaises(ValueError, setattr, self.signals[0],
'sampling_rate', 5.5)
def test__sampling_period_setter_None_ValueError(self):
signal = self.signals[0]
assert_neo_object_is_compliant(signal)
self.assertRaises(ValueError, setattr, signal, 'sampling_period', None)
def test__sampling_period_setter_not_quantity_ValueError(self):
self.assertRaises(ValueError, setattr, self.signals[0],
'sampling_period', 5.5)
def test__t_start_setter_None_ValueError(self):
signal = self.signals[0]
assert_neo_object_is_compliant(signal)
self.assertRaises(ValueError, setattr, signal, 't_start', None)
def test__times_getter(self):
for i, signal in enumerate(self.signals):
targ = np.arange(self.data[i].size)
targ = targ/self.rates[i] + self.t_start[i]
assert_neo_object_is_compliant(signal)
assert_arrays_almost_equal(signal.times, targ, 1e-12*pq.ms)
def test__duplicate_with_new_array(self):
signal1 = self.signals[1]
signal2 = self.signals[2]
data2 = self.data[2]
signal1b = signal1.duplicate_with_new_array(data2)
assert_arrays_almost_equal(np.asarray(signal1b),
np.asarray(signal2/1000.), 1e-12)
self.assertEqual(signal1b.t_start, signal1.t_start)
self.assertEqual(signal1b.sampling_rate, signal1.sampling_rate)
# def test__children(self):
# signal = self.signals[0]
#
# segment = Segment(name='seg1')
# segment.analogsignals = [signal]
# segment.create_many_to_one_relationship()
#
# rchan = RecordingChannel(name='rchan1')
# rchan.analogsignals = [signal]
# rchan.create_many_to_one_relationship()
#
# self.assertEqual(signal._single_parent_objects,
# ('Segment', 'RecordingChannel'))
# self.assertEqual(signal._multi_parent_objects, ())
#
# self.assertEqual(signal._single_parent_containers,
# ('segment', 'recordingchannel'))
# self.assertEqual(signal._multi_parent_containers, ())
#
# self.assertEqual(signal._parent_objects,
# ('Segment', 'RecordingChannel'))
# self.assertEqual(signal._parent_containers,
# ('segment', 'recordingchannel'))
#
# self.assertEqual(len(signal.parents), 2)
# self.assertEqual(signal.parents[0].name, 'seg1')
# self.assertEqual(signal.parents[1].name, 'rchan1')
#
# assert_neo_object_is_compliant(signal)
def test__repr(self):
for i, signal in enumerate(self.signals):
prepr = repr(signal)
targ = '<AnalogSignal(%s, [%s, %s], sampling rate: %s)>' % \
(repr(self.data[i].reshape(-1, 1)),
self.t_start[i],
self.t_start[i] + len(self.data[i])/self.rates[i],
self.rates[i])
self.assertEqual(prepr, targ)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
for i, signal in enumerate(self.signals):
prepr = pretty(signal)
targ = (('AnalogSignal with %d channels of length %d; units %s; datatype %s \n' %
(signal.shape[1], signal.shape[0], signal.units.dimensionality.unicode, signal.dtype)) +
('annotations: %s\n' % signal.annotations) +
('sampling rate: %s\n' % signal.sampling_rate) +
('time: %s to %s' % (signal.t_start, signal.t_stop)))
self.assertEqual(prepr, targ)
class TestAnalogSignalArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.nA
self.signal1 = AnalogSignal(self.data1quant, sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
self.signal1.segment = 1
self.signal1.channel_index = ChannelIndex(index=[0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
def test__slice_should_return_AnalogSignalArray(self):
# slice
result = self.signal1[3:8, 0]
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam') # should slicing really preserve name and description?
self.assertEqual(result.description, 'eggs') # perhaps these should be modified to indicate the slice?
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.size, 5)
self.assertEqual(result.sampling_period, self.signal1.sampling_period)
self.assertEqual(result.sampling_rate, self.signal1.sampling_rate)
self.assertEqual(result.t_start,
self.signal1.t_start+3*result.sampling_period)
self.assertEqual(result.t_stop,
result.t_start + 5*result.sampling_period)
assert_array_equal(result.magnitude, self.data1[3:8].reshape(-1, 1))
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(result.file_origin, self.signal1.file_origin)
self.assertEqual(result.name, self.signal1.name)
self.assertEqual(result.description, self.signal1.description)
self.assertEqual(result.annotations, self.signal1.annotations)
def test__slice_should_let_access_to_parents_objects(self):
result = self.signal1.time_slice(1*pq.ms,3*pq.ms)
self.assertEqual(result.segment, self.signal1.segment)
self.assertEqual(result.channel_index, self.signal1.channel_index)
def test__slice_should_change_sampling_period(self):
result1 = self.signal1[:2, 0]
result2 = self.signal1[::2, 0]
result3 = self.signal1[1:7:2, 0]
self.assertIsInstance(result1, AnalogSignal)
assert_neo_object_is_compliant(result1)
self.assertEqual(result1.name, 'spam')
self.assertEqual(result1.description, 'eggs')
self.assertEqual(result1.file_origin, 'testfile.txt')
self.assertEqual(result1.annotations, {'arg1': 'test'})
self.assertIsInstance(result2, AnalogSignal)
assert_neo_object_is_compliant(result2)
self.assertEqual(result2.name, 'spam')
self.assertEqual(result2.description, 'eggs')
self.assertEqual(result2.file_origin, 'testfile.txt')
self.assertEqual(result2.annotations, {'arg1': 'test'})
self.assertIsInstance(result3, AnalogSignal)
assert_neo_object_is_compliant(result3)
self.assertEqual(result3.name, 'spam')
self.assertEqual(result3.description, 'eggs')
self.assertEqual(result3.file_origin, 'testfile.txt')
self.assertEqual(result3.annotations, {'arg1': 'test'})
self.assertEqual(result1.sampling_period, self.signal1.sampling_period)
self.assertEqual(result2.sampling_period,
self.signal1.sampling_period * 2)
self.assertEqual(result3.sampling_period,
self.signal1.sampling_period * 2)
assert_array_equal(result1.magnitude, self.data1[:2].reshape(-1, 1))
assert_array_equal(result2.magnitude, self.data1[::2].reshape(-1, 1))
assert_array_equal(result3.magnitude, self.data1[1:7:2].reshape(-1, 1))
def test__slice_should_modify_linked_channelindex(self):
n = 8 # number of channels
signal = AnalogSignal(np.arange(n * 100.0).reshape(100, n),
sampling_rate=1*pq.kHz,
units="mV")
self.assertEqual(signal.shape, (100, n))
signal.channel_index = ChannelIndex(index=np.arange(n, dtype=int),
channel_names=["channel{0}".format(i) for i in range(n)])
odd_channels = signal[:, 1::2]
self.assertEqual(odd_channels.shape, (100, n//2))
assert_array_equal(odd_channels.channel_index.index, np.arange(n//2, dtype=int))
assert_array_equal(odd_channels.channel_index.channel_names, ["channel{0}".format(i) for i in range(1, n, 2)])
assert_array_equal(signal.channel_index.channel_names, ["channel{0}".format(i) for i in range(n)])
def test__copy_should_let_access_to_parents_objects(self):
##copy
result = self.signal1.copy()
self.assertEqual(result.segment, self.signal1.segment)
self.assertEqual(result.channel_index, self.signal1.channel_index)
## deep copy (not fixed yet)
#result = copy.deepcopy(self.signal1)
#self.assertEqual(result.segment, self.signal1.segment)
#self.assertEqual(result.channel_index, self.signal1.channel_index)
def test__getitem_should_return_single_quantity(self):
result1 = self.signal1[0, 0]
result2 = self.signal1[9, 0]
self.assertIsInstance(result1, pq.Quantity)
self.assertFalse(hasattr(result1, 'name'))
self.assertFalse(hasattr(result1, 'description'))
self.assertFalse(hasattr(result1, 'file_origin'))
self.assertFalse(hasattr(result1, 'annotations'))
self.assertIsInstance(result2, pq.Quantity)
self.assertFalse(hasattr(result2, 'name'))
self.assertFalse(hasattr(result2, 'description'))
self.assertFalse(hasattr(result2, 'file_origin'))
self.assertFalse(hasattr(result2, 'annotations'))
self.assertEqual(result1, 0*pq.nA)
self.assertEqual(result2, 9*pq.nA)
def test__getitem_out_of_bounds_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, (10, 0))
def test_comparison_operators(self):
assert_array_equal(self.signal1 >= 5*pq.nA,
np.array([False, False, False, False, False,
True, True, True, True, True]).reshape(-1, 1))
assert_array_equal(self.signal1 >= 5*pq.pA,
np.array([False, True, True, True, True,
True, True, True, True, True]).reshape(-1, 1))
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5*pq.mV)
def test__simple_statistics(self):
self.assertEqual(self.signal1.max(), 9*pq.nA)
self.assertEqual(self.signal1.min(), 0*pq.nA)
self.assertEqual(self.signal1.mean(), 4.5*pq.nA)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.nA)
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.units, 1*pq.nA)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_same_sub_schema(result, self.signal1)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.pA)
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.units, 1*pq.pA)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1)*1000., 1e-10)
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.mV)
def test_as_array(self):
sig_as_arr = self.signal1.as_array()
self.assertIsInstance(sig_as_arr, np.ndarray)
assert_array_equal(self.data1, sig_as_arr.flat)
def test_as_quantity(self):
sig_as_q = self.signal1.as_quantity()
self.assertIsInstance(sig_as_q, pq.Quantity)
assert_array_equal(self.data1, sig_as_q.magnitude.flat)
class TestAnalogSignalEquality(unittest.TestCase):
def test__signals_with_different_data_complement_should_be_not_equal(self):
signal1 = AnalogSignal(np.arange(10.0), units="mV",
sampling_rate=1*pq.kHz)
signal2 = AnalogSignal(np.arange(10.0), units="mV",
sampling_rate=2*pq.kHz)
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
self.assertNotEqual(signal1, signal2)
class TestAnalogSignalCombination(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.signal1 = AnalogSignal(self.data1quant,
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt',
arg1='test')
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
def test__add_const_quantity_should_preserve_data_complement(self):
result = self.signal1 + 0.065*pq.V
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_array_equal(result.magnitude.flatten(), self.data1 + 65)
self.assertEqual(self.signal1[9, 0], 9*pq.mV)
self.assertEqual(result[9, 0], 74*pq.mV)
self.assertEqual(self.signal1.t_start, result.t_start)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__add_quantity_should_preserve_data_complement(self):
data2 = np.arange(10.0, 20.0).reshape(-1, 1)
data2quant = data2*pq.mV
result = self.signal1 + data2quant
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignal(np.arange(10.0, 30.0, 2.0), units="mV",
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_array_equal(result, targ)
assert_same_sub_schema(result, targ)
def test__add_two_consistent_signals_should_preserve_data_complement(self):
data2 = np.arange(10.0, 20.0)
data2quant = data2*pq.mV
signal2 = AnalogSignal(data2quant, sampling_rate=1*pq.kHz)
assert_neo_object_is_compliant(signal2)
result = self.signal1 + signal2
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignal(np.arange(10.0, 30.0, 2.0), units="mV",
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_array_equal(result, targ)
assert_same_sub_schema(result, targ)
def test__add_signals_with_inconsistent_data_complement_ValueError(self):
self.signal1.t_start = 0.0*pq.ms
assert_neo_object_is_compliant(self.signal1)
signal2 = AnalogSignal(np.arange(10.0), units="mV",
t_start=100.0*pq.ms, sampling_rate=0.5*pq.kHz)
assert_neo_object_is_compliant(signal2)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__subtract_const_should_preserve_data_complement(self):
result = self.signal1 - 65*pq.mV
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(self.signal1[9, 0], 9*pq.mV)
self.assertEqual(result[9, 0], -56*pq.mV)
assert_array_equal(result.magnitude.flatten(), self.data1 - 65)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__subtract_from_const_should_return_signal(self):
result = 10*pq.mV - self.signal1
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(self.signal1[9, 0], 9*pq.mV)
self.assertEqual(result[9, 0], 1*pq.mV)
assert_array_equal(result.magnitude.flatten(), 10 - self.data1)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__mult_by_const_float_should_preserve_data_complement(self):
result = self.signal1*2
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(self.signal1[9, 0], 9*pq.mV)
self.assertEqual(result[9, 0], 18*pq.mV)
assert_array_equal(result.magnitude.flatten(), self.data1*2)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__divide_by_const_should_preserve_data_complement(self):
result = self.signal1/0.5
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(self.signal1[9, 0], 9*pq.mV)
self.assertEqual(result[9, 0], 18*pq.mV)
assert_array_equal(result.magnitude.flatten(), self.data1/0.5)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
class TestAnalogSignalFunctions(unittest.TestCase):
def test__pickle(self):
signal1 = AnalogSignal([1, 2, 3, 4], sampling_period=1*pq.ms,
units=pq.S)
signal1.annotations['index'] = 2
fobj = open('./pickle', 'wb')
pickle.dump(signal1, fobj)
fobj.close()
fobj = open('./pickle', 'rb')
try:
signal2 = pickle.load(fobj)
except ValueError:
signal2 = None
assert_array_equal(signal1, signal2)
fobj.close()
os.remove('./pickle')
class TestAnalogSignalSampling(unittest.TestCase):
def test___get_sampling_rate__period_none_rate_none_ValueError(self):
sampling_rate = None
sampling_period = None
self.assertRaises(ValueError, _get_sampling_rate,
sampling_rate, sampling_period)
def test___get_sampling_rate__period_quant_rate_none(self):
sampling_rate = None
sampling_period = pq.Quantity(10., units=pq.s)
targ_rate = 1/sampling_period
out_rate = _get_sampling_rate(sampling_rate, sampling_period)
self.assertEqual(targ_rate, out_rate)
def test___get_sampling_rate__period_none_rate_quant(self):
sampling_rate = pq.Quantity(10., units=pq.Hz)
sampling_period = None
targ_rate = sampling_rate
out_rate = _get_sampling_rate(sampling_rate, sampling_period)
self.assertEqual(targ_rate, out_rate)
def test___get_sampling_rate__period_rate_equivalent(self):
sampling_rate = pq.Quantity(10., units=pq.Hz)
sampling_period = pq.Quantity(0.1, units=pq.s)
targ_rate = sampling_rate
out_rate = _get_sampling_rate(sampling_rate, sampling_period)
self.assertEqual(targ_rate, out_rate)
def test___get_sampling_rate__period_rate_not_equivalent_ValueError(self):
sampling_rate = pq.Quantity(10., units=pq.Hz)
sampling_period = pq.Quantity(10, units=pq.s)
self.assertRaises(ValueError, _get_sampling_rate,
sampling_rate, sampling_period)
def test___get_sampling_rate__period_none_rate_float_TypeError(self):
sampling_rate = 10.
sampling_period = None
self.assertRaises(TypeError, _get_sampling_rate,
sampling_rate, sampling_period)
def test___get_sampling_rate__period_array_rate_none_TypeError(self):
sampling_rate = None
sampling_period = np.array(10.)
self.assertRaises(TypeError, _get_sampling_rate,
sampling_rate, sampling_period)
if __name__ == "__main__":
unittest.main()
|
Proggie02/TestRepo | refs/heads/master | django/conf/locale/de/formats.py | 107 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
pakra1/inf1340_2015_asst1 | refs/heads/master | exercise3.py | 6 | #!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def diagnose_car():
"""
Interactively queries the user with yes/no questions to identify a
possible issue with a car.
Inputs:
Expected Outputs:
Errors:
"""
print("The battery cables may be damaged. Replace cables and try again.")
diagnose_car() |
mhils/mitmproxy | refs/heads/master | examples/addons/events-websocket-specific.py | 1 | """WebSocket-specific events."""
import mitmproxy.http
import mitmproxy.websocket
class Events:
# WebSocket lifecycle
def websocket_handshake(self, flow: mitmproxy.http.HTTPFlow):
"""
Called when a client wants to establish a WebSocket connection. The
WebSocket-specific headers can be manipulated to alter the
handshake. The flow object is guaranteed to have a non-None request
attribute.
"""
def websocket_start(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has commenced.
"""
def websocket_message(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
Called when a WebSocket message is received from the client or
server. The most recent message will be flow.messages[-1]. The
message is user-modifiable. Currently there are two types of
messages, corresponding to the BINARY and TEXT frame types.
"""
def websocket_error(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has had an error.
"""
def websocket_end(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has ended.
"""
|
nikpap/inspire-next | refs/heads/master | inspirehep/modules/forms/bundles.py | 2 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Bundles for forms used across INSPIRE."""
from invenio_assets import NpmBundle
from invenio_assets.filters import RequireJSFilter
from inspirehep.modules.theme.bundles import js as _js
js = NpmBundle(
"js/forms/inspire-form-init.js",
output="gen/inspire-form.%(version)s.js",
filters=RequireJSFilter(exclude=[_js]),
npm={
"eonasdan-bootstrap-datetimepicker": "~4.15.35",
"typeahead.js": "~0.10.5",
"bootstrap-multiselect": "~0.9.13",
"moment": "~2.11.2",
}
)
css = NpmBundle(
"scss/forms/form.scss",
"node_modules/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.css",
"node_modules/typeahead.js-bootstrap-css/typeaheadjs.css",
"node_modules/bootstrap-multiselect/dist/css/bootstrap-multiselect.css",
output='gen/inspire-form.%(version)s.css',
depends='scss/forms/*.scss',
filters="scss, cleancss",
npm={
"typeahead.js-bootstrap-css": "~1.2.1"
}
)
|
alanjw/GreenOpenERP-Win-X86 | refs/heads/7.0 | python/Lib/distutils/file_util.py | 55 | """distutils.file_util
Utility functions for operating on single files.
"""
__revision__ = "$Id$"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = {None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking'}
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'.
Both must be filenames. Any error opening either file, reading from
'src', or writing to 'dst', raises DistutilsFileError. Data is
read/written in chunks of 'buffer_size' bytes (default 16k). No attempt
is made to handle anything apart from regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError("could not open '%s': %s" % (src, errstr))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, errstr))
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not create '%s': %s" % (dst, errstr))
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not read from '%s': %s" % (src, errstr))
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, errstr))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'.
If 'dst' is a directory, then 'src' is copied there with the same name;
otherwise, it must be a filename. (If the file exists, it will be
ruthlessly clobbered.) If 'preserve_mode' is true (the default),
the file's mode (type and permission bits, or whatever is analogous on
the current platform) is copied. If 'preserve_times' is true (the
default), the last-modified and last-access times are copied as well.
If 'update' is true, 'src' will only be copied if 'dst' does not exist,
or if 'dst' does exist but is older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
if link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst, verbose=1, dry_run=0):
"""Move a file 'src' to 'dst'.
If 'dst' is a directory, the file will be moved into it with the same
name; otherwise, 'src' is just renamed to 'dst'. Return the new
full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" % \
(src, dst))
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") %
(src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
|
kastnerkyle/pylearn2 | refs/heads/master | pylearn2/expr/normalize.py | 5 | """
Code for normalizing outputs of MLP / convnet layers.
"""
__authors__ = "Ian Goodfellow and David Warde-Farley"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow and David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import theano.tensor as T
from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm
class CrossChannelNormalizationBC01(object):
"""
BC01 version of CrossChannelNormalization
Parameters
----------
alpha : WRITEME
k : WRITEME
beta : WRITEME
n : WRITEME
"""
def __init__(self, alpha = 1e-4, k=2, beta=0.75, n=5):
self.__dict__.update(locals())
del self.self
if n % 2 == 0:
raise NotImplementedError("Only works with odd n for now")
def __call__(self, bc01):
"""
.. todo::
WRITEME
"""
half = self.n // 2
sq = T.sqr(bc01)
b, ch, r, c = bc01.shape
extra_channels = T.alloc(0., b, ch + 2*half, r, c)
sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq)
scale = self.k
for i in xrange(self.n):
scale += self.alpha * sq[:,i:i+ch,:,:]
scale = scale ** self.beta
return bc01 / scale
class CrossChannelNormalization(object):
"""
See "ImageNet Classification with Deep Convolutional Neural Networks"
Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton
NIPS 2012
Section 3.3, Local Response Normalization
.. todo::
WRITEME properly
f(c01b)_[i,j,k,l] = c01b[i,j,k,l] / scale[i,j,k,l]
scale[i,j,k,l] = (k + sqr(c01b)[clip(i-n/2):clip(i+n/2),j,k,l].sum())^beta
clip(i) = T.clip(i, 0, c01b.shape[0]-1)
Parameters
----------
alpha : WRITEME
k : WRITEME
beta : WRITEME
n : WRITEME
"""
def __init__(self, alpha = 1e-4, k=2, beta=0.75, n=5):
self.__dict__.update(locals())
del self.self
if n % 2 == 0:
raise NotImplementedError("Only works with odd n for now")
def __call__(self, c01b):
"""
.. todo::
WRITEME
"""
half = self.n // 2
sq = T.sqr(c01b)
ch, r, c, b = c01b.shape
extra_channels = T.alloc(0., ch + 2*half, r, c, b)
sq = T.set_subtensor(extra_channels[half:half+ch,:,:,:], sq)
scale = self.k
for i in xrange(self.n):
scale += self.alpha * sq[i:i+ch,:,:,:]
scale = scale ** self.beta
return c01b / scale
class CudaConvNetCrossChannelNormalization(object):
"""
.. todo::
WRITEME properly
I kept the same parameter names where I was sure they
actually are the same parameters (with respect to
CrossChannelNormalization).
Parameters
----------
alpha : WRITEME
beta : WRITEME
size_f : WRITEME
blocked : WRITEME
"""
def __init__(self, alpha=1e-4, beta=0.75, size_f=5, blocked=True):
self._op = CrossMapNorm(size_f=size_f, add_scale=alpha,
pow_scale=beta, blocked=blocked)
def __call__(self, c01b):
"""
.. todo::
WRITEME properly
NOTE: c01b must be CudaNdarrayType."""
return self._op(c01b)[0]
|
jonlooney/jxmlease | refs/heads/master | jxmlease/dictnode.py | 1 | #!/usr/bin/env python
# Copyright (c) 2015-2016, Juniper Networks, Inc.
# All rights reserved.
#
# Copyright (C) 2012 Martin Blech and individual contributors.
#
# See the LICENSE file for further information.
"""Module that provides the XMLDictNode class."""
from __future__ import absolute_import
from xml.sax.xmlreader import AttributesImpl
from copy import copy
from . import _node_refs, OrderedDict, pprint, _unicode
from . import _XMLCDATAPlaceholder, _XMLListPlaceholder
from ._basenode import _common_docstring, _docstring_fixup, XMLNodeBase
__all__ = ['XMLDictNode']
XMLCDATANode = _XMLCDATAPlaceholder
XMLListNode = _XMLListPlaceholder
def _resolve_references_once():
"""Internal function to resolve late references.
There are circular references between the three node types. If we try
to import all of the references into each module at parse time, the
parser (rightly) complains about an infinite loop. This function "solves"
that by doing a one-time load of the symbols the first time an
instance of the class is changed. The function then replaces its own
name in the module symbol table with a lambda function to turn this
into a no-op.
"""
# pylint: disable=global-statement
# pylint: disable=invalid-name
global XMLCDATANode
global XMLListNode
global _resolve_references
XMLCDATANode = _node_refs['XMLCDATANode']
XMLListNode = _node_refs['XMLListNode']
_resolve_references = lambda: None
_resolve_references = _resolve_references_once
class XMLDictNode(XMLNodeBase, OrderedDict):
"""(docstring to be replaced by __doc__)"""
__doc__ = _common_docstring("XMLDictNode")
def __new__(cls, *args, **kwargs):
_resolve_references()
return super(XMLDictNode, cls).__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(XMLDictNode, self).__init__(*args, **kwargs)
self.__const_class_name__ = self.__class__.__name__
self._ignore_level = False
def add_node(self, tag, key=None, text=_unicode(), new_node=None,
update=True, **kwargs):
self._check_replacement()
if new_node is None:
# By default, we create a CDATA node.
new_node = XMLCDATANode(text, tag=tag, **kwargs)
if key:
if update:
new_node.key = copy(key)
else:
key = tag
else:
if not isinstance(new_node, XMLNodeBase):
raise TypeError("'new_node' argument must be a subclass of "
"XMLNodeBase, not '%s'"
% (type(new_node).__name__))
if key:
if update:
new_node.key = (key)
elif new_node.key:
key = new_node.key
else:
key = tag
if update:
new_node.tag = tag
# Let's see if we already have an entry with this key. If so,
# it needs to be a list.
if key in self:
# Make it a list, if not already.
if not isinstance(self[key], XMLListNode):
old_node = self[key]
self[key] = XMLListNode([old_node], tag=tag, key=key,
parent=self, convert=False)
if update:
old_node.parent = self[key]
del old_node
# Add the new node to the list.
if update:
new_node.parent = self[key]
self[key].append(new_node)
else:
# Add to the dictionary.
if update:
new_node.parent = self
self[key] = new_node
return new_node
def standardize(self, deep=True):
for k in self:
node = self[k]
if not isinstance(node, XMLNodeBase):
# Set the key and parent. Assume the tag is the same
# as the key.
# If we were told to do a deep conversion, then convert
# the child; otherwise, don't.
kwargs = dict(convert=deep, deep=deep, tag=k, key=k,
parent=self)
# Convert dicts to XMLDictNodes.
# Convert lists to XMLListNodes.
# Convert everything else to an XMLCDATANode with
# a best guess for the correct string value.
if isinstance(node, (OrderedDict, dict)):
self[k] = XMLDictNode(node, **kwargs)
elif isinstance(node, list):
self[k] = XMLListNode(node, **kwargs)
else:
if node is None:
node = _unicode('')
elif not isinstance(node, (_unicode, str)):
node = _unicode(node)
self[k] = XMLCDATANode(node, **kwargs)
else:
# Update the internal book-keeping entries that might
# need to be changed.
self._check_replacement()
if not node.tag:
node.tag = k
node.key = k
node.parent = self
if deep:
node.standardize(deep=deep)
def _emit_handler(self, content_handler, depth, pretty, newl, indent):
# Special case: If tag is None and depth is 0, then we might be the
# root container, which is tagless.
# Special case: If self._ignore_level is True, then we just want to
# work on the children.
if (self.tag is None and depth == 0) or self._ignore_level:
first_element = True
for k in self:
if pretty and depth == 0 and not first_element:
content_handler.ignorableWhitespace(newl)
self[k]._emit_handler(content_handler, depth, pretty, newl,
indent)
first_element = False
return
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(self.tag, AttributesImpl(self.xml_attrs))
if pretty and len(self) > 0:
content_handler.ignorableWhitespace(newl)
for k in self:
self[k]._emit_handler(content_handler, depth+1, pretty, newl,
indent)
content_handler.characters(_unicode.strip(self.get_cdata()))
if pretty and len(self) > 0:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(self.tag)
if pretty and depth > 0:
content_handler.ignorableWhitespace(newl)
def prettyprint(self, *args, **kwargs):
currdepth = kwargs.pop("currdepth", 0)
depth = kwargs.get("depth", None)
if depth is not None and depth < currdepth:
return {}
# Construct a new item, recursively.
newdict = dict()
for (k, v) in self.items():
if hasattr(v, "prettyprint"):
newdict[k] = v.prettyprint(*args, currdepth=currdepth+1,
**kwargs)
else:
newdict[k] = v
if currdepth == 0:
pprint(newdict, *args, **kwargs)
else:
return newdict
def _find_nodes_with_tag(self, tag, recursive=True, top_level=False):
# Special case: If tag is None and top_level is True, then
# we might be the root container, which is tagless.
# Special case: If self._ignore_level is True, then we just
# want to work on the children.
pass_through = self._ignore_level or (self.tag is None and top_level)
if self.tag in tag and not pass_through:
matched = True
yield self
else:
matched = False
if recursive or (top_level and not matched):
for node in self.values():
kwargs = {'recursive': recursive}
# Pass through the top_level arg, if appropriate.
if pass_through:
kwargs['top_level'] = top_level
for item in node._find_nodes_with_tag(tag, **kwargs):
yield item
_docstring_fixup(XMLDictNode)
|
joshfriend/sqlalchemy-utils | refs/heads/master | sqlalchemy_utils/decorators.py | 2 | from collections import defaultdict
import itertools
import sqlalchemy as sa
import six
from .functions import getdotattr
class AttributeValueGenerator(object):
def __init__(self):
self.listener_args = [
(
sa.orm.mapper,
'mapper_configured',
self.update_generator_registry
),
(
sa.orm.session.Session,
'before_flush',
self.update_generated_properties
)
]
self.reset()
def reset(self):
if (
hasattr(self, 'listeners_registered') and
self.listeners_registered
):
for args in self.listener_args:
sa.event.remove(*args)
self.listeners_registered = False
# TODO: make the registry a WeakKey dict
self.generator_registry = defaultdict(list)
def generator_wrapper(self, func, attr, source):
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
if isinstance(attr, sa.orm.attributes.InstrumentedAttribute):
self.generator_registry[attr.class_].append(wrapper)
wrapper.__generates__ = attr, source
else:
wrapper.__generates__ = attr, source
return wrapper
def register_listeners(self):
if not self.listeners_registered:
for args in self.listener_args:
sa.event.listen(*args)
self.listeners_registered = True
def update_generator_registry(self, mapper, class_):
"""
Adds generator functions to generator_registry.
"""
for generator in class_.__dict__.values():
if hasattr(generator, '__generates__'):
self.generator_registry[class_].append(generator)
def update_generated_properties(self, session, ctx, instances):
for obj in itertools.chain(session.new, session.dirty):
class_ = obj.__class__
if class_ in self.generator_registry:
for func in self.generator_registry[class_]:
attr, source = func.__generates__
if not isinstance(attr, six.string_types):
attr = attr.name
if source is None:
setattr(obj, attr, func(obj))
else:
setattr(obj, attr, func(obj, getdotattr(obj, source)))
generator = AttributeValueGenerator()
def generates(attr, source=None, generator=generator):
"""
.. deprecated:: 0.28.0
Use :func:`.observer.observes` instead.
Decorator that marks given function as attribute value generator.
Many times you may have generated property values. Usual cases include
slugs from names or resized thumbnails from images.
SQLAlchemy-Utils provides a way to do this easily with `generates`
decorator:
::
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
slug = sa.Column(sa.Unicode(255))
@generates(slug)
def _create_slug(self):
return self.name.lower().replace(' ', '-')
article = self.Article()
article.name = u'some article name'
self.session.add(article)
self.session.flush()
assert article.slug == u'some-article-name'
You can also pass the attribute name as a string argument for `generates`:
::
class Article(Base):
...
@generates('slug')
def _create_slug(self):
return self.name.lower().replace(' ', '-')
These property generators can even be defined outside classes:
::
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
slug = sa.Column(sa.Unicode(255))
@generates(Article.slug)
def _create_article_slug(article):
return article.name.lower().replace(' ', '-')
Property generators can have sources outside:
::
class Document(self.Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
class Section(self.Base):
__tablename__ = 'section'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
document_id = sa.Column(
sa.Integer, sa.ForeignKey(Document.id)
)
document = sa.orm.relationship(Document)
@generates(locale, source='document')
def copy_locale(self, document):
return document.locale
You can also use dotted attribute paths for deep relationship paths:
::
class SubSection(self.Base):
__tablename__ = 'subsection'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
section_id = sa.Column(
sa.Integer, sa.ForeignKey(Section.id)
)
section = sa.orm.relationship(Section)
@generates(locale, source='section.document')
def copy_locale(self, document):
return document.locale
"""
generator.register_listeners()
def wraps(func):
return generator.generator_wrapper(func, attr, source)
return wraps
|
csm-aut/csm | refs/heads/master | csmserver/views/host_dashboard.py | 1 | # =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from flask import Blueprint
from flask import request
from flask import abort
from flask import jsonify
from flask import render_template
from flask_login import current_user
from wtforms import Form
from wtforms import StringField
from wtforms import HiddenField
from wtforms import SelectMultipleField
from wtforms.validators import required
from sqlalchemy import and_
from database import DBSession
from common import get_host
from common import get_software_profile_by_id
from common import can_delete_install
from common import can_retrieve_software
from common import delete_install_job_dependencies
from common import get_last_successful_inventory_elapsed_time
from models import logger
from models import Package
from models import SystemOption
from models import Satellite
from models import InstallJob
from models import InstallJobHistory
from models import InventoryJobHistory
from forms import HostScheduleInstallForm
from constants import UNKNOWN
from constants import JobStatus
from constants import PackageState
from constants import InstallAction
from flask_login import login_required
from filters import time_difference_UTC
import collections
import datetime
host_dashboard = Blueprint('host_dashboard', __name__, url_prefix='/host_dashboard')
@host_dashboard.route('/hosts/<hostname>/')
@login_required
def home(hostname):
db_session = DBSession()
host = get_host(db_session, hostname)
if host is None:
abort(404)
return render_template('host/host_dashboard.html', host=host,
form=get_host_schedule_install_form(request),
manage_satellite_software_form=ManageSatelliteSoftwareDialogForm(request.form),
satellite_install_action=get_satellite_install_action_dict(),
system_option=SystemOption.get(db_session),
server_time=datetime.datetime.utcnow(),
package_states=[PackageState.ACTIVE_COMMITTED,
PackageState.ACTIVE,
PackageState.INACTIVE_COMMITTED,
PackageState.INACTIVE])
def get_satellite_install_action_dict():
return {
'transfer': InstallAction.SATELLITE_TRANSFER,
'activate': InstallAction.SATELLITE_ACTIVATE
}
def get_host_schedule_install_form(request):
return HostScheduleInstallForm(request.form)
@host_dashboard.route('/api/hosts/<hostname>/host_dashboard/cookie')
@login_required
def api_get_host_dashboard_cookie(hostname):
db_session = DBSession()
host = get_host(db_session, hostname)
rows = []
if host is not None:
software_profile = get_software_profile_by_id(db_session, host.software_profile_id)
system_option = SystemOption.get(db_session)
row = {}
connection_param = host.connection_param[0]
row['hostname'] = host.hostname
row['region'] = host.region.name if host.region is not None else UNKNOWN
row['location'] = host.location
row['roles'] = host.roles
row['platform'] = host.platform
row['software_platform'] = host.software_platform
row['software_version'] = host.software_version
row['host_or_ip'] = connection_param.host_or_ip
row['username'] = connection_param.username
row['connection'] = connection_param.connection_type
row['port_number'] = connection_param.port_number
row['created_by'] = host.created_by
row['software_profile_name'] = '' if software_profile is None else software_profile.name
if connection_param.jump_host is not None:
row['jump_host'] = connection_param.jump_host.hostname
# Last inventory successful time
inventory_job = host.inventory_job[0]
row['last_successful_inventory_elapsed_time'] = get_last_successful_inventory_elapsed_time(host)
row['last_successful_inventory_time'] = inventory_job.last_successful_time
row['status'] = inventory_job.status
row['can_schedule'] = system_option.can_schedule
row['can_install'] = system_option.can_install
rows.append(row)
return jsonify(**{'data': rows})
@host_dashboard.route('/api/hosts/<hostname>/packages/<package_state>')
@login_required
def api_get_host_dashboard_packages(hostname, package_state):
db_session = DBSession()
host = get_host(db_session, hostname)
rows = []
if host is not None:
# It is possible that package_state contains a commas delimited state list.
# In this case, the union of those packages will be used.
package_states = package_state.split(',')
packages = []
for package_state in package_states:
packages_list = db_session.query(Package).filter(
and_(Package.host_id == host.id, Package.state == package_state)). order_by(Package.name).all()
if len(packages_list) > 0:
packages.extend(packages_list)
has_module_packages = False
for package in packages:
if len(package.modules_package_state) > 0:
has_module_packages = True
break
if has_module_packages:
module_package_dict = {}
# Format it from module, then packages
for package in packages:
package_name = package.name if package.location is None else package.location + ':' + package.name
for modules_package_state in package.modules_package_state:
module = modules_package_state.module_name
if module in module_package_dict:
module_package_dict[module].append(package_name)
else:
package_list = []
package_list.append(package_name)
module_package_dict[module] = package_list
sorted_dict = collections.OrderedDict(sorted(module_package_dict.items()))
for module in sorted_dict:
package_list = sorted_dict[module]
rows.append({'package': module})
for package_name in package_list:
rows.append({'package': (' ' * 7) + package_name})
else:
for package in packages:
rows.append({'package': package.name if package.location is None else package.location + ':' + package.name})
return jsonify(**{'data': rows})
@host_dashboard.route('/api/hosts/<hostname>/scheduled_installs')
@login_required
def api_get_host_dashboard_scheduled_install(hostname):
"""
Returns scheduled installs for a host in JSON format.
"""
db_session = DBSession()
host = get_host(db_session, hostname)
rows = []
if host is not None and len(host.install_job) > 0:
for install_job in host.install_job:
row = {}
row['hostname'] = host.hostname
row['install_job_id'] = install_job.id
row['install_action'] = install_job.install_action
row['scheduled_time'] = install_job.scheduled_time
row['session_log'] = install_job.session_log
row['status'] = install_job.status
if install_job.data:
job_info = install_job.data.get('job_info')
if job_info:
row['job_info'] = install_job.id
rows.append(row)
return jsonify(**{'data': rows})
def get_inventory_job_json_dict(inventory_jobs):
rows = []
for inventory_job in inventory_jobs:
row = {}
row['hostname'] = inventory_job.host.hostname
row['status'] = inventory_job.status
row['status_time'] = inventory_job.status_time
row['elapsed_time'] = time_difference_UTC(inventory_job.status_time)
row['inventory_job_id'] = inventory_job.id
if inventory_job.session_log is not None:
row['session_log'] = inventory_job.session_log
if inventory_job.trace is not None:
row['trace'] = inventory_job.id
rows.append(row)
return {'data': rows}
@host_dashboard.route('/api/hosts/<hostname>/software_inventory_history', methods=['GET'])
@login_required
def api_get_host_dashboard_software_inventory_history(hostname):
rows = []
db_session = DBSession()
host = get_host(db_session, hostname)
if host is not None:
inventory_jobs = db_session.query(InventoryJobHistory).filter(InventoryJobHistory.host_id == host.id). \
order_by(InventoryJobHistory.created_time.desc())
return jsonify(**get_inventory_job_json_dict(inventory_jobs))
return jsonify(**{'data': rows})
@host_dashboard.route('/hosts/<hostname>/delete_all_failed_installations/', methods=['DELETE'])
@login_required
def delete_all_failed_installations_for_host(hostname):
if not can_delete_install(current_user):
abort(401)
return delete_all_installations_for_host(hostname=hostname, status=JobStatus.FAILED)
@host_dashboard.route('/hosts/<hostname>/delete_all_scheduled_installations/', methods=['DELETE'])
@login_required
def delete_all_scheduled_installations_for_host(hostname):
if not can_delete_install(current_user):
abort(401)
return delete_all_installations_for_host(hostname)
def delete_all_installations_for_host(hostname, status=JobStatus.SCHEDULED):
if not can_delete_install(current_user):
abort(401)
db_session = DBSession()
host = get_host(db_session, hostname)
if host is None:
abort(404)
try:
install_jobs = db_session.query(InstallJob).filter(
InstallJob.host_id == host.id, InstallJob.status == status).all()
if not install_jobs:
return jsonify(status="No record fits the delete criteria.")
for install_job in install_jobs:
db_session.delete(install_job)
if status == JobStatus.FAILED:
delete_install_job_dependencies(db_session, install_job.id)
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('delete_install_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
@host_dashboard.route('/api/get_inventory/<hostname>')
@login_required
def get_inventory(hostname):
if not can_retrieve_software(current_user):
abort(401)
db_session = DBSession()
host = get_host(db_session, hostname)
if host is not None:
if host.inventory_job[0].status not in [JobStatus.SCHEDULED, JobStatus.IN_PROGRESS]:
host.inventory_job[0].status = JobStatus.SCHEDULED
db_session.commit()
return jsonify({'status': 'OK'})
return jsonify({'status': 'Failed'})
@host_dashboard.route('/api/is_host_valid/<hostname>')
@login_required
def api_is_host_valid(hostname):
db_session = DBSession()
host = get_host(db_session, hostname)
if host is not None:
return jsonify({'status': 'OK'})
return jsonify({'status': 'Failed'})
@host_dashboard.route('/api/hosts/<hostname>/satellites')
@login_required
def api_get_host_satellites(hostname):
rows = []
db_session = DBSession()
host = get_host(db_session, hostname)
if host is not None:
satellites = db_session.query(Satellite).filter(Satellite.host_id == host.id)
for satellite in satellites:
if satellite.state == 'Connected' and \
not satellite.remote_version == 'Compatible (latest version)':
row = dict()
row['satellite_id'] = satellite.satellite_id
row['type'] = satellite.type
row['state'] = satellite.state
row['install_state'] = satellite.install_state
row['ip_address'] = satellite.ip_address
row['mac_address'] = satellite.mac_address
row['serial_number'] = satellite.serial_number
row['remote_version'] = satellite.remote_version
row['remote_version_details'] = satellite.remote_version_details
row['fabric_links'] = satellite.fabric_links
rows.append(row)
return jsonify(**{'data': rows})
@host_dashboard.route('/api/hosts/<hostname>/get_satellite_count')
@login_required
def api_get_satellite_count(hostname):
db_session = DBSession()
satellite_count = 0
host = get_host(db_session, hostname)
if host is not None:
satellite_count = db_session.query(Satellite).filter(Satellite.host_id == host.id).count()
return jsonify(**{'data': {'satellite_count': satellite_count}})
class ManageSatelliteSoftwareDialogForm(Form):
satellite_install_action = SelectMultipleField('Install Action', coerce=str, choices=[('', '')])
satellite_scheduled_time = StringField('Scheduled Time', [required()])
satellite_scheduled_time_UTC = HiddenField('Scheduled Time')
|
ninotoshi/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/softplus_op_test.py | 15 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SoftplusTest(tf.test.TestCase):
def _npSoftplus(self, np_features):
return np.log(1 + np.exp(np_features))
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = tf.nn.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllClose(np_softplus, tf_softplus)
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float, np.double]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradient(self):
with self.test_session():
x = tf.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5], name="x")
y = tf.nn.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32, order="F")
err = tf.test.compute_gradient_error(x,
[2, 5],
y,
[2, 5],
x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
tf.test.main()
|
diedthreetimes/VCrash | refs/heads/master | src/visualizer/visualizer/hud.py | 189 | import goocanvas
import core
import math
import pango
import gtk
class Axes(object):
def __init__(self, viz):
self.viz = viz
self.color = 0x8080C0FF
self.hlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.hlines.lower(None)
self.vlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.vlines.lower(None)
self.labels = []
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
def update(adj):
if self.visible:
self.update_view()
hadj.connect("value-changed", update)
vadj.connect("value-changed", update)
hadj.connect("changed", update)
vadj.connect("changed", update)
self.visible = True
self.update_view()
def set_visible(self, visible):
self.visible = visible
if self.visible:
self.hlines.props.visibility = goocanvas.ITEM_VISIBLE
self.vlines.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.hlines.props.visibility = goocanvas.ITEM_HIDDEN
self.vlines.props.visibility = goocanvas.ITEM_HIDDEN
for label in self.labels:
label.props.visibility = goocanvas.ITEM_HIDDEN
def _compute_divisions(self, xi, xf):
assert xf > xi
dx = xf - xi
size = dx
ndiv = 5
text_width = dx/ndiv/2
def rint(x):
return math.floor(x+0.5)
dx_over_ndiv = dx / ndiv
for n in range(5): # iterate 5 times to find optimum division size
#/* div: length of each division */
tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */
div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */
if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
div /= 2
elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
div *= 2 # /* test if div*2 is closer to dx/ndiv */
x0 = div*math.ceil(xi / div) - div
if n > 1:
ndiv = rint(size / text_width)
return x0, div
def update_view(self):
if self.viz.zoom is None:
return
unused_labels = self.labels
self.labels = []
for label in unused_labels:
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
def get_label():
try:
label = unused_labels.pop(0)
except IndexError:
label = goocanvas.Text(parent=self.viz.canvas.get_root_item(), stroke_color_rgba=self.color)
else:
label.set_property("visibility", goocanvas.ITEM_VISIBLE)
label.lower(None)
self.labels.append(label)
return label
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
zoom = self.viz.zoom.value
offset = 10/zoom
x1, y1 = self.viz.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.viz.canvas.convert_from_pixels(hadj.value + hadj.page_size, vadj.value + vadj.page_size)
line_width = 5.0/self.viz.zoom.value
# draw the horizontal axis
self.hlines.set_property("line-width", line_width)
yc = y2 - line_width/2
sim_x1 = x1/core.PIXELS_PER_METER
sim_x2 = x2/core.PIXELS_PER_METER
x0, xdiv = self._compute_divisions(sim_x1, sim_x2)
path = ["M %r %r L %r %r" % (x1, yc, x2, yc)]
x = x0
while x < sim_x2:
path.append("M %r %r L %r %r" % (core.PIXELS_PER_METER*x, yc - offset, core.PIXELS_PER_METER*x, yc))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % x),
fill_color_rgba=self.color,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=core.PIXELS_PER_METER*x,
y=(yc - offset))
x += xdiv
del x
self.hlines.set_property("data", " ".join(path))
# draw the vertical axis
self.vlines.set_property("line-width", line_width)
xc = x1 + line_width/2
sim_y1 = y1/core.PIXELS_PER_METER
sim_y2 = y2/core.PIXELS_PER_METER
y0, ydiv = self._compute_divisions(sim_y1, sim_y2)
path = ["M %r %r L %r %r" % (xc, y1, xc, y2)]
y = y0
while y < sim_y2:
path.append("M %r %r L %r %r" % (xc, core.PIXELS_PER_METER*y, xc + offset, core.PIXELS_PER_METER*y))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % y),
fill_color_rgba=self.color,
alignment=pango.ALIGN_LEFT,
anchor=gtk.ANCHOR_W,
x=xc + offset,
y=core.PIXELS_PER_METER*y)
y += ydiv
self.vlines.set_property("data", " ".join(path))
self.labels.extend(unused_labels)
|
h2oai/h2o-3 | refs/heads/master | h2o-py/tests/pyunit_utils/utilsPY.py | 1 | # Py2 compat
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from past.builtins import basestring
# standard lib
import copy
import datetime
from decimal import *
from functools import reduce
import imp
import json
import math
import os
import random
import re
import shutil
import string
import subprocess
from subprocess import STDOUT,PIPE
import sys
import time # needed to randomly generate time
import threading
import urllib.request, urllib.error, urllib.parse
import uuid # call uuid.uuid4() to generate unique uuid numbers
try:
from StringIO import StringIO # py2 (first as py2 also has io.StringIO, but without string support, only unicode)
except:
from io import StringIO # py3
try:
from tempfile import TemporaryDirectory
except ImportError:
import tempfile
class TemporaryDirectory:
def __init__(self):
self.tmp_dir = None
def __enter__(self):
self.tmp_dir = tempfile.mkdtemp()
return self.tmp_dir
def __exit__(self, *args):
shutil.rmtree(self.tmp_dir)
# 3rd parties
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
import scipy.special
# h2o
sys.path.insert(1, "../../")
import h2o
from h2o.model.binomial import H2OBinomialModel
from h2o.model.clustering import H2OClusteringModel
from h2o.model.multinomial import H2OMultinomialModel
from h2o.model.ordinal import H2OOrdinalModel
from h2o.model.regression import H2ORegressionModel
from h2o.estimators import H2OGradientBoostingEstimator, H2ODeepLearningEstimator, H2OGeneralizedLinearEstimator, \
H2OGeneralizedAdditiveEstimator, H2OKMeansEstimator, H2ONaiveBayesEstimator, H2ORandomForestEstimator, \
H2OPrincipalComponentAnalysisEstimator
from h2o.utils.typechecks import is_type
from h2o.utils.shared_utils import temp_ctr # unused in this file but exposed here for symmetry with rest_ctr
class Timeout:
def __init__(self, timeout_secs, on_timeout=None):
enabled = timeout_secs is not None and timeout_secs >= 0
self.timer = threading.Timer(timeout_secs, on_timeout) if enabled else None
def __enter__(self):
if self.timer:
self.timer.start()
return self
def __exit__(self, *args):
if self.timer:
self.timer.cancel()
class Namespace:
"""
simplistic namespace class allowing to create bag/namespace objects that are easily extendable in a functional way
"""
@staticmethod
def add(namespace, **kwargs):
namespace.__dict__.update(kwargs)
return namespace
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def extend(self, **kwargs):
"""
:param kwargs: attributes extending the current namespace
:return: a new namespace containing same attributes as the original + the extended ones
"""
clone = Namespace(**self.__dict__)
clone.__dict__.update(**kwargs)
return clone
def ns(**kwargs):
return Namespace(**kwargs)
def gen_random_uuid(numberUUID):
uuidVec = numberUUID*[None]
for uindex in range(numberUUID):
uuidVec[uindex] = uuid.uuid4()
return uuidVec
def gen_random_time(numberTimes, maxtime= datetime.datetime(2080, 8,6,8,14,59), mintime=datetime.datetime(1980, 8,6,6,14,59)):
'''
Simple method that I shameless copied from the internet.
:param numberTimes:
:param maxtime:
:param mintime:
:return:
'''
mintime_ts = int(time.mktime(mintime.timetuple()))
maxtime_ts = int(time.mktime(maxtime.timetuple()))
randomTimes = numberTimes*[None]
for tindex in range(numberTimes):
temptime = random.randint(mintime_ts, maxtime_ts)
randomTimes[tindex] = datetime.datetime.fromtimestamp(temptimes)
return randomTimes
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent.
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = model1.__class__.__name__
model2_type = model1.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel) or isinstance(model1,H2OOrdinalModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python
object conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:param dim_only: check the dimensions only
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if not dim_only:
if isinstance(python_obj, (list, tuple)):
for c in range(cols):
for r in range(rows):
pval = python_obj[r]
if isinstance(pval, (list, tuple)): pval = pval[c]
hval = h2o_frame[r, c]
assert pval == hval or abs(pval - hval) < 1e-10, \
"expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in list(python_obj.keys()):
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-5, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
# perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are
# returned.
def mojo_predict(model, tmpdir, mojoname, glrmReconstruct=False, get_leaf_node_assignment=False, glrmIterNumber=-1, zipFilePath=None):
"""
perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are returned.
It is assumed that the input data set is saved as in.csv in tmpdir directory.
:param model: h2o model where you want to use to perform prediction
:param tmpdir: directory where your mojo zip files are stired
:param mojoname: name of your mojo zip file.
:param glrmReconstruct: True to return reconstructed dataset, else return the x factor.
:return: the h2o prediction frame and the mojo prediction frame
"""
newTest = h2o.import_file(os.path.join(tmpdir, 'in.csv'), header=1) # Make sure h2o and mojo use same in.csv
predict_h2o = model.predict(newTest)
# load mojo and have it do predict
outFileName = os.path.join(tmpdir, 'out_mojo.csv')
mojoZip = os.path.join(tmpdir, mojoname) + ".zip"
if not(zipFilePath==None):
mojoZip = zipFilePath
genJarDir = str.split(os.path.realpath("__file__"),'/')
genJarDir = '/'.join(genJarDir[0:genJarDir.index('h2o-py')]) # locate directory of genmodel.jar
java_cmd = ["java", "-ea", "-cp", os.path.join(genJarDir, "h2o-assemblies/genmodel/build/libs/genmodel.jar"),
"-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--input", os.path.join(tmpdir, 'in.csv'), "--output",
outFileName, "--mojo", mojoZip, "--decimal"]
if get_leaf_node_assignment:
java_cmd.append("--leafNodeAssignment")
predict_h2o = model.predict_leaf_node_assignment(newTest)
if glrmReconstruct: # used for GLRM to grab the x coefficients (factors) instead of the predicted values
java_cmd.append("--glrmReconstruct")
if glrmIterNumber > 0:
java_cmd.append("--glrmIterNumber")
java_cmd.append(str(glrmIterNumber))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
files = os.listdir(tmpdir)
print("listing files {1} in directory {0}".format(tmpdir, files))
outfile = os.path.join(tmpdir, 'out_mojo.csv')
if not os.path.exists(outfile) or os.stat(outfile).st_size == 0:
print("MOJO SCORING FAILED:")
print("--------------------")
print(o.decode("utf-8"))
print("***** importing file {0}".format(outfile))
pred_mojo = h2o.import_file(outfile, header=1) # load mojo prediction in
# to a frame and compare
if glrmReconstruct or ('glrm' not in model.algo):
return predict_h2o, pred_mojo
else:
return newTest.frame_id, pred_mojo
# perform pojo predict. Frame containing pojo predict is returned.
def pojo_predict(model, tmpdir, pojoname):
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
java_file = os.path.join(tmpdir, pojoname + ".java")
in_csv = (os.path.join(tmpdir, 'in.csv')) # import the test dataset
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", java_file]
subprocess.check_call(javac_cmd)
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--decimal"]
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predict_pojo = h2o.import_file(out_pojo_csv, header=1)
return predict_pojo
def javapredict(algo, equality, train, test, x, y, compile_only=False, separator=",", setInvNumNA=False,**kwargs):
print("Creating model in H2O")
if algo == "gbm": model = H2OGradientBoostingEstimator(**kwargs)
elif algo == "random_forest": model = H2ORandomForestEstimator(**kwargs)
elif algo == "deeplearning": model = H2ODeepLearningEstimator(**kwargs)
elif algo == "glm": model = H2OGeneralizedLinearEstimator(**kwargs)
elif algo == "gam": model = H2OGeneralizedAdditiveEstimator(**kwargs)
elif algo == "naive_bayes": model = H2ONaiveBayesEstimator(**kwargs)
elif algo == "kmeans": model = H2OKMeansEstimator(**kwargs)
elif algo == "pca": model = H2OPrincipalComponentAnalysisEstimator(**kwargs)
else: raise ValueError
if algo == "kmeans" or algo == "pca": model.train(x=x, training_frame=train)
else: model.train(x=x, y=y, training_frame=train)
print(model)
# HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.
# TODO: clients should extract Java class name from header.
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
pojoname = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Predicting in H2O")
predictions = model.predict(test)
predictions.summary()
predictions.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(predictions, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("H2O Predictions saved in {0}".format(out_h2o_csv))
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test[x], in_csv)
# hack: the PredictCsv driver can't handle quoted strings, so remove them
f = open(in_csv, "r+")
csv = f.read()
csv = re.sub('\"', "", csv)
csv = re.sub(",", separator, csv) # replace with arbitrary separator for input dataset
f.seek(0)
f.write(csv)
f.truncate()
f.close()
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to PredictCsv saved in {0}".format(in_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv", "--decimal",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--separator", separator]
if setInvNumNA:
java_cmd.append("--setConvertInvalidNum")
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predictions2 = h2o.upload_file(path=out_pojo_csv)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = predictions.dim
pr, pc = predictions2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
if not(equality == "class"or equality == "numeric"):
raise ValueError
compare_frames_local(predictions, predictions2, prob=1, tol=1e-4) # faster frame compare
def javamunge(assembly, pojoname, test, compile_only=False):
"""
Here's how to use:
assembly is an already fit H2OAssembly;
The test set should be used to compare the output here and the output of the POJO.
"""
print("Downloading munging POJO code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
assembly.to_pojo(pojoname, path=tmpdir, get_jar=True)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test, in_csv)
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to mungedCSV saved in {0}".format(in_csv))
print("Predicting in H2O")
munged = assembly.fit(test)
munged.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(munged, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("Munged frame saved in {0}".format(out_h2o_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.MungeCsv", "--header", "--munger", pojoname,
"--input", in_csv, "--output", out_pojo_csv]
print("JAVA COMMAND: " + " ".join(java_cmd))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = munged.dim
pr, pc = munged2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
import math
import numbers
munged.show()
munged2.show()
for r in range(hr):
for c in range(hc):
hp = munged[r,c]
pp = munged2[r,c]
if isinstance(hp, numbers.Number):
assert isinstance(pp, numbers.Number)
assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
else:
assert hp==pp, "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
if (test_is_on_hadoop()):
# Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,
# in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.
# Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins
# machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST
# be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/
# are smalldata and bigdata.
p = os.path.realpath(os.path.join("/home/0xdiag/", path))
if not os.path.exists(p): raise ValueError("File not found: " + path)
return p
else:
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
try:
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
except ValueError as e:
url = "https://h2o-public-test-data.s3.amazonaws.com/{}".format(path)
if url_exists(url):
return url
raise
def url_exists(url):
head_req = urllib.request.Request(url, method='HEAD')
try:
with urllib.request.urlopen(head_req) as test:
return test.status == 200
except urllib.error.URLError:
return False
def hadoop_namenode_is_accessible():
url = "http://{0}:50070".format(hadoop_namenode())
try:
urllib.urlopen(url)
internal = True
except:
internal = False
return internal
def test_is_on_hadoop():
if hasattr(sys.modules["tests.pyunit_utils"], '__on_hadoop__'):
return sys.modules["tests.pyunit_utils"].__on_hadoop__
return False
def hadoop_namenode():
if os.getenv("NAME_NODE"):
return os.getenv("NAME_NODE").split(".")[0]
elif hasattr(sys.modules["tests.pyunit_utils"], '__hadoop_namenode__'):
return sys.modules["tests.pyunit_utils"].__hadoop_namenode__
return None
def pyunit_exec(test_name):
with open(test_name, "r") as t:
pyunit = t.read()
test_path = os.path.abspath(test_name)
pyunit_c = compile(pyunit, test_path, 'exec')
exec(pyunit_c, dict(__name__='__main__', __file__=test_path)) # forcing module name to ensure that the test behaves the same way as when executed using `python my_test.py`
def standalone_test(test):
if not h2o.connection() or not h2o.connection().connected:
print("Creating connection for test %s" % test.__name__)
h2o.init(strict_version_check=False)
print("New session: %s" % h2o.connection().session_id)
h2o.remove_all()
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST "+test.__name__)
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
test()
def run_tests(tests, run_in_isolation=True):
#flatten in case of nested tests/test suites
all_tests = reduce(lambda l, r: (l.extend(r) if isinstance(r, (list, tuple)) else l.append(r)) or l, tests, [])
for test in all_tests:
header = "Running {}{}".format(test.__name__, "" if not hasattr(test, 'tag') else " [{}]".format(test.tag))
print("\n"+('='*len(header))+"\n"+header)
if run_in_isolation:
standalone_test(test)
else:
test()
def tag_test(test, tag):
if tag is not None:
test.tag = tag
return test
def assert_warn(predicate, message):
try:
assert predicate, message
except AssertionError as e:
print("WARN: {}".format(str(e)))
def make_random_grid_space(algo, ncols=None, nrows=None):
"""
Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to
H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected.
:param algo: a string {"gbm", "rf", "dl", "km", "glm"} representing the algo dimension of the grid space
:param ncols: Used for mtries selection or k (pca)
:param nrows: Used for k (pca)
:return: a dictionary of parameter_name:list_of_values
"""
grid_space = {}
if algo in ["gbm", "rf"]:
if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3))
if algo == "gbm":
if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1)
if algo == "rf":
if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3))
if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))]
elif algo == "km":
grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3))
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['standardize'] = [True, False]
if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3))
elif algo == "glm":
if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))]
grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)
if grid_space['family'] == "tweedie":
if random.randint(0,1):
grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))]
grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power']
elif algo == "dl":
if random.randint(0,1): grid_space['activation'] = \
random.sample(["Rectifier", "Tanh", "TanhWithDropout", "RectifierWithDropout", "MaxoutWithDropout"],
random.randint(2,3))
if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1)
return grid_space
elif algo == "naiveBayes":
grid_space['laplace'] = 0
if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))]
if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
elif algo == "pca":
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['transform'] = random.sample(["NONE","STANDARDIZE","NORMALIZE","DEMEAN","DESCALE"], random.randint(2,3))
grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3))
else:
raise ValueError
return grid_space
# Validate given models' parameters against expected values
def expect_model_param(models, attribute_name, expected_values):
print("param: {0}".format(attribute_name))
actual_values = list(set([m.params[attribute_name]['actual'] \
if type(m.params[attribute_name]['actual']) != list
else m.params[attribute_name]['actual'][0] for m in models.models]))
# possible for actual to be a list (GLM)
if type(expected_values) != list:
expected_values = [expected_values]
# limit precision. Rounding happens in some models like RF
actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values]
expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values]
print("actual values: {0}".format(actual_values))
print("expected values: {0}".format(expected_values))
actual_values_len = len(actual_values)
expected_values_len = len(expected_values)
assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " \
"{1}".format(expected_values_len, actual_values_len)
actual_values = sorted(actual_values)
expected_values = sorted(expected_values)
for i in range(len(actual_values)):
if isinstance(actual_values[i], float):
assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, "Too large of a difference betewen actual and " \
"expected value. Actual value: {}. Expected value: {}"\
.format(actual_values[i], expected_values[i])
else:
assert actual_values[i] == expected_values[i], "Expected: {}. Actual: {}"\
.format(expected_values[i], actual_values[i])
def rest_ctr():
return h2o.connection().requests_count
def write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename,
csv_test_data_filename, csv_weight_name, row_count, col_count, data_type,
max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, class_number=2,
class_method=('probability', 'probability', 'probability'),
class_margin=[0.0, 0.0, 0.0]):
"""
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to
null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if
no test data set is to be generated.
:param csv_weight_name: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param class_number: integer, optional, representing number of classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# generate bias b and weight as a column vector
weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type,
family_type, noise_std, weights,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[2], class_margin=class_margin[2])
def write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot,
csv_validation_data_filename, csv_validation_filename_true_one_hot,
csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count,
col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2,
class_method=['probability', 'probability', 'probability'],
class_margin=[0.0, 0.0, 0.0]):
"""
This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case
contains categorical data as well as real data.
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to null
string if no training data set is to be generated.
:param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set
with true one-hot encoding. Set to null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set
with true one-hot. Set to null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null
string if no test data set is to be generated.
:param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true
one-hot encoding. Set to null string if no test data set is to be generated.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels.
# hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per
# categorical columns added to our predictors
new_col_count = col_count - enum_col + sum(enum_level_vec)+len(enum_level_vec)
# generate the weights to be applied to the training/validation/test data sets
# this is for true one hot encoding. For reference+one hot encoding, will skip
# few extra weights
weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot,
valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std,
weights, enum_col, enum_level_vec, class_number=class_number,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[2], class_margin=class_margin[2])
def generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian',
class_number=2):
"""
Generate random intercept and weight vectors (integer or real) for GLM algo and save
the values in a file specified by csv_weight_filename.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param class_number: integer, optional, representing number classes for binomial and multinomial
:return: column vector of size 1+colCount representing intercept and weight or matrix of size
1+colCount by class_number
"""
# first generate random intercept and weight
if 'gaussian' in family_type.lower():
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1])
else:
assert False, "dataType must be 1 or 2 for now."
elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower()
or ('ordinal' in family_type.lower())):
if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight
class_number -= 1
if class_number <= 0:
assert False, "class_number must be >= 2!"
if isinstance(col_count, np.ndarray):
temp_col_count = col_count[0]
else:
temp_col_count = col_count
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number])
else:
assert False, "dataType must be 1 or 2 for now."
# special treatment for ordinal weights
if 'ordinal' in family_type.lower():
num_pred = len(weight)
for index in range(class_number):
weight[0,index] = 0
for indP in range(1,num_pred):
weight[indP,index] = weight[indP,0] # make sure betas for all classes are the same
np.savetxt(csv_weight_filename, weight.transpose(), delimiter=",")
return weight
def generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type,
noise_std, weight, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then
call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e
where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between
the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)).
For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector
X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and
responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
if data_type == 1: # generate random integers
x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count])
elif data_type == 2: # generate random real numbers
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count])
else:
assert False, "dataType must be 1 or 2 for now. "
# generate the response vector to the input predictors
response_y = generate_response_glm(weight, x_mat, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this data sample before proceeding
# if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()) or ('ordinal' in family_type.lower()):
# if 'threshold' in class_method.lower():
# if np.any(response_y < 0): # remove negative entries out of data set
# (x_mat, response_y) = remove_negative_response(x_mat, response_y)
# write to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list):
"""
This function is used to generate clusters of points around cluster_centers listed in
cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list.
The size of each cluster could be different and it is specified in cluster_radius_list.
:param cluster_center_list: list of coordinates of cluster centers
:param cluster_pt_number_list: number of points to generate for each cluster center
:param cluster_radius_list: list of size of each cluster
:return: list of sample points that belong to various clusters
"""
k = len(cluster_pt_number_list) # number of clusters to generate clusters for
if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):
assert False, "Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!"
training_sets = []
for k_ind in range(k):
new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],
cluster_radius_list[k_ind])
if k_ind > 0:
training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)
else:
training_sets = new_cluster_data
# want to shuffle the data samples so that the clusters are all mixed up
map(np.random.shuffle, training_sets)
return training_sets
def generate_one_cluster(cluster_center, cluster_number, cluster_size):
"""
This function will generate a full cluster wither cluster_number points centered on cluster_center
with maximum radius cluster_size
:param cluster_center: python list denoting coordinates of cluster center
:param cluster_number: integer denoting number of points to generate for this cluster
:param cluster_size: float denoting radius of cluster
:return: np matrix denoting a cluster
"""
pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])
coord_pts = len(cluster_center) # dimension of each cluster point
one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)
for p_ind in range(cluster_number):
coord_indices = list(range(coord_pts))
random.shuffle(coord_indices) # randomly determine which coordinate to generate
left_radius = pt_dists[p_ind]
for c_ind in range(coord_pts):
coord_index = coord_indices[c_ind]
one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],
left_radius+cluster_center[coord_index])
left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-
cluster_center[coord_index]), 2))
return one_cluster_data
def remove_negative_response(x_mat, response_y):
"""
Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,
response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded. However, when we
generate the data set, we keep all samples. For data sample with maximum class probability that fails to be
greater by margin than the second highest class probability, the response is set to be -1. This function will
remove all data samples (predictors and responses) with response set to -1.
:param x_mat: predictor matrix containing all predictor values
:param response_y: response that can be negative if that data sample is to be removed
:return: tuple containing x_mat, response_y with negative data samples removed.
"""
y_response_negative = np.where(response_y < 0) # matrix of True or False
x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response
# remove rows with negative response
response_y = response_y[response_y >= 0]
return x_mat,response_y.transpose()
def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,
max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,
class_number=2, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo with mixed categorical and real value
predictors. First randomly generate the predictors, then call function generate_response_glm to generate the
corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise
added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to
be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between
the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the
response. The predictors and responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot
encoding.
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with
the maximum class probability if the maximum class probability exceeds the second highest class probability by
the value set in margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data set sample. This field is only
meaningful if class_method is set to 'threshold'
:return: None
"""
# generate the random training data sets
enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors
# generate categorical data columns
for indc in range(enum_col):
enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)
# generate real data columns
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])
x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns
if len(csv_filename_true_one_hot) > 0:
generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange)
if len(csv_filename) > 0:
generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=False)
def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,
family_type, class_method='probability', class_margin=0.0, weightChange=False):
"""
Given the weights and input data matrix with mixed categorical and real value predictors, this function will
generate a supervised data set and save the input data and response in a csv format file specified by
csv_filename. It will first encode the enums without using one hot encoding with or without a reference
level first before generating a response Y.
:param csv_filename: string representing full path filename to store supervised data set with reference level
plus true one-hot encoding.
:param x_mat: predictor matrix with mixed columns (categorical/real values)
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param enum_col: integer representing actual number of categorical columns in data set
:param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus
one hot encoding
:param weight: vector representing w in our formula to generate the response
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data sample. This field is only meaningful if
class_method is set to 'threshold'
:return: None
"""
# encode the enums
x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)
# extract the correct weight dimension for the data set
if not true_one_hot:
(num_row, num_col) = x_mat_encoded.shape
weight = weight[0:num_col+1] # +1 to take care of the intercept term
# generate the corresponding response vector given the weight and encoded input predictors
response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
if 'threshold' in class_method.lower():
(x_mat,response_y) = remove_negative_response(x_mat, response_y)
# write generated data set to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans):
"""
Given 2-d numpy array of predictors with categorical and real columns, this function will
encode the enum columns with 1-hot encoding or with reference plus one hot encoding
:param dataset: 2-d numpy array of predictors with both categorical and real columns
:param enum_level_vec: vector containing maximum level for each categorical column
:param enum_col: number of categorical columns in the data set
:param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot
encoding
:param include_nans: bool indicating if we have nans in categorical columns
:return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference
"""
(num_row, num_col) = dataset.shape
# split the data set into categorical and real parts
enum_arrays = dataset[:, 0:enum_col]
new_enum_arrays = []
# perform the encoding for each element of categorical part
for indc in range(enum_col):
enum_col_num = enum_level_vec[indc]+1
if not true_one_hot:
enum_col_num -= 1
if include_nans and np.any(enum_arrays[:, indc]):
enum_col_num += 1
new_temp_enum = np.zeros((num_row, enum_col_num))
one_hot_matrix = one_hot_encoding(enum_col_num)
last_col_index = enum_col_num-1
# encode each enum using 1-hot encoding or plus reference value
for indr in range(num_row):
enum_val = enum_arrays[indr, indc]
if true_one_hot: # not using true one hot
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index)
else:
if enum_val:
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index)
if indc == 0:
new_enum_arrays = new_temp_enum
else:
new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1)
return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1)
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):
"""
Generate encoded bits for a categorical data value using one hot encoding.
:param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding
:param enum_val: categorical data value, could be np.nan
:param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding
:param last_col_index: index into encoding for np.nan if exists
:return: vector representing the encoded values for a enum value
"""
if np.isnan(enum_val): # if data value is np.nan
return one_hot_matrix[last_col_index]
else:
return one_hot_matrix[int(enum_val-add_value)]
def one_hot_encoding(enum_level):
"""
Generate the one_hot_encoding matrix given the number of enum_level.
:param enum_level: generate the actual one-hot encoding matrix
:return: numpy array for the enum_level specified. Note, enum_level <= 6
"""
if enum_level >= 2:
base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels
for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2
(num_row, num_col) = base_array.shape
col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix
base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero
row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros
row_zeros[0, 0] = 1 # set first element to 1
base_array = np.concatenate((base_array, row_zeros), axis=0)
return base_array
else:
assert False, "enum_level must be >= 2."
def generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability',
class_margin=0.0, weightChange=False, even_distribution=True):
"""
Generate response vector given weight matrix, predictors matrix for the GLM algo.
:param weight: vector representing w in our formula to generate the response
:param x_mat: random numpy matrix (2-D ndarray) containing the predictors
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (Gaussian, multinomial, binomial)
supported by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data set sample. This field is only meaningful if
class_method is set to 'threshold'
:return: vector representing the response
"""
(num_row, num_col) = x_mat.shape
temp_ones_col = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones_col, x_mat), axis=1)
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
if 'ordinal' in family_type.lower():
(num_sample, num_class) = response_y.shape
lastClass = num_class - 1
if weightChange:
tresp = []
# generate the new y threshold
for indP in range(num_sample):
tresp.append(-response_y[indP,0])
tresp.sort()
num_per_class = int(len(tresp)/num_class)
if (even_distribution):
for indC in range(lastClass):
weight[0,indC] = tresp[(indC+1)*num_per_class]
else: # do not generate evenly distributed class, generate randomly distributed classes
splitInd = []
lowV = 0.1
highV = 1
v1 = 0
acc = 0
for indC in range(lastClass):
tempf = random.uniform(lowV, highV)
splitInd.append(v1+int(tempf*num_per_class))
v1 = splitInd[indC] # from last class
acc += 1-tempf
highV = 1+acc
for indC in range(lastClass): # put in threshold
weight[0,indC] = tresp[splitInd[indC]]
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
discrete_y = np.zeros((num_sample, 1), dtype=np.int)
for indR in range(num_sample):
discrete_y[indR, 0] = lastClass
for indC in range(lastClass):
if (response_y[indR, indC] >= 0):
discrete_y[indR, 0] = indC
break
return discrete_y
# added more to form Multinomial response
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials
if 'binomial' in family_type.lower():
ntemp_mat = temp_mat + 1
btemp_mat = temp_mat / ntemp_mat
temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes
response_y = derive_discrete_response(temp_mat, class_method, class_margin, family_type)
return response_y
def derive_discrete_response(prob_mat, class_method, class_margin, family_type='binomial'):
"""
This function is written to generate the final class response given the probabilities (Prob(y=k)). There are
two methods that we use and is specified by the class_method. If class_method is set to 'probability',
response y is generated randomly according to the class probabilities calculated. If set to 'threshold',
response y is set to the class with the maximum class probability if the maximum class probability exceeds the
second highest class probability by the value set in margin. If the maximum class probability fails to be
greater by margin than the second highest class probability, the data sample will be discarded later by
marking the final response as -1.
:param prob_mat: probability matrix specifying the probability that y=k where k is a class
:param class_method: string set to 'probability' or 'threshold'
:param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to
be kept or discarded.
:return: response vector representing class of y or -1 if an data sample is to be discarded.
"""
(num_sample, num_class) = prob_mat.shape
discrete_y = np.argmax(prob_mat, axis=1)
return discrete_y
def normalize_matrix(mat):
"""
This function will normalize a matrix across each row such that the row sum is 1.
:param mat: matrix containing prob(y=k)
:return: normalized matrix containing prob(y=k)
"""
(n, K) = mat.shape
kronmat = np.ones((1, K), dtype=float)
row_sum = np.sum(mat, axis=1)
row_sum_mat = np.kron(row_sum, kronmat)
return mat/row_sum_mat
def move_files(dir_path, old_name, new_file, action='move'):
"""
Simple function to move or copy a data set (old_name) to a special directory (dir_path)
with new name (new_file) so that we will be able to re-run the tests if we
have found something wrong with the algorithm under test with the data set.
This is done to avoid losing the data set.
:param dir_path: string representing full directory path where a file is to be moved to
:param old_name: string representing file (filename with full directory path) to be moved to new directory.
:param new_file: string representing the file name of the moved in the new directory
:param action: string, optional, represent the action 'move' or 'copy' file
:return: None
"""
new_name = os.path.join(dir_path, new_file) # generate new filename including directory path
if os.path.isfile(old_name): # only move/copy file old_name if it actually exists
if 'move' in action:
motion = 'mv '
elif 'copy' in action:
motion = 'cp '
else:
assert False, "Illegal action setting. It can only be 'move' or 'copy'!"
cmd = motion+old_name+' '+new_name # generate cmd line string to move the file
subprocess.call(cmd, shell=True)
def remove_files(filename):
"""
Simple function to remove data set saved in filename if the dynamic test is completed with no
error. Some data sets we use can be rather big. This is performed to save space.
:param filename: string representing the file to be removed. Full path is included.
:return: None
"""
cmd = 'rm ' + filename
subprocess.call(cmd, shell=True)
def random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor):
"""
This function will randomly determine for each column if it should be duplicated.
If it is to be duplicated, how many times, the duplication should be. In addition, a
scaling factor will be randomly applied to each duplicated column if enabled.
:param num_cols: integer representing number of predictors used
:param duplication_threshold: threshold to determine if a column is to be duplicated. Set
this number to be low if you want to encourage column duplication and vice versa
:param max_number: maximum number of times a column is to be duplicated
:param to_scale: bool indicating if a duplicated column is to be scaled
:param max_scale_factor: real representing maximum scale value for repeated columns
:return: a tuple containing two vectors: col_return, col_scale_return.
col_return: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column.
"""
col_indices = list(range(num_cols)) # contains column indices of predictors in original data set
col_scales = [1]*num_cols # scaling factor for original data set, all ones.
for ind in range(num_cols): # determine for each column if to duplicate it
temp = random.uniform(0, 1) # generate random number from 0 to 1
if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold
rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column
more_col_indices = [ind]*rep_num
col_indices.extend(more_col_indices)
temp_scale = []
for ind in range(rep_num):
if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with
temp_scale.append(random.uniform(0, max_scale_factor))
else:
temp_scale.append(1)
col_scales.extend(temp_scale)
# randomly shuffle the predictor column orders and the corresponding scaling factors
new_col_indices = list(range(len(col_indices)))
random.shuffle(new_col_indices)
col_return = [col_indices[i] for i in new_col_indices]
col_scale_return = [col_scales[i] for i in new_col_indices]
return col_return, col_scale_return
def duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename):
"""
This function actually performs the column duplication with scaling giving the column
indices and scaling factors for each column. It will first load the original data set
from old_filename. After performing column duplication and scaling, the new data set
will be written to file with new_filename.
:param col_indices: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
:param col_scale: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set is to be stored
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in original data set
#
# pd_frame_new = pd.DataFrame() # new empty data frame
#
# for ind in range(len(col_indices)): # for each column
# tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it
# pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(num_row, num_col) = np_frame.shape
np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float))
for ind in range(len(col_indices)):
np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind]
# done changing the data frame. Save it in a new file
np.savetxt(new_filename, np_frame_new, delimiter=",")
def insert_nan_in_data(old_filename, new_filename, missing_fraction):
"""
Give the filename of a data set stored in old_filename, this function will randomly determine
for each predictor to replace its value with nan or not with probability missing_frac. The
new data set will be stored in filename new_filename.
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set with missing
values is to be stored
:param missing_fraction: real value representing the probability of replacing a predictor with nan.
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(row_count, col_count) = np_frame.shape
random_matrix = np.random.uniform(0, 1, [row_count, col_count-1])
for indr in range(row_count): # for each predictor value, determine if to replace value with nan
for indc in range(col_count-1):
if random_matrix[indr, indc] < missing_fraction:
np_frame[indr, indc] = np.nan
# save new data set with missing values to new file
np.savetxt(new_filename, np_frame, delimiter=",")
# pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan')
def print_message_values(start_string, nump_array):
"""
This function prints the value of a nump_array with a string message in front of it.
:param start_string: string representing message to be printed
:param nump_array: array storing something
:return: None
"""
print(start_string)
print(nump_array)
def show_test_results(test_name, curr_test_val, new_test_val):
"""
This function prints the test execution results which can be passed or failed. A message will be printed on
screen to warn user of the test result.
:param test_name: string representing test name
:param curr_test_val: integer representing number of tests failed so far before the test specified in test_name
is executed
:param new_test_val: integer representing number of tests failed after the test specified in test_name is
executed
:return: integer: 0 if test passed and 1 if test faild.
"""
failed_string = "Ooops, " + test_name + " failed. I am sorry..."
pass_string = "Yeah, " + test_name + " passed!"
if (curr_test_val < new_test_val): # this test has failed
print(failed_string)
return 1
else:
print(pass_string)
return 0
def assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):
'''
This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter
than table2. However, for whatever part of table2 table1 has, they must be the same.
:param table1:
:param table2:
:param col_header_list:
:param tolerance:
:return:
'''
size1 = len(table1.cell_values)
for cname in col_header_list:
colindex = table1.col_header.index(cname)
for cellind in range(size1):
val1 = table1.cell_values[cellind][colindex]
val2 = table2.cell_values[cellind][colindex]
if isinstance(val1, float) and isinstance(val2, float):
assert abs(val1-val2) < tolerance, \
"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
else:
assert val1==val2, "table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
print("******* Congrats! Test passed. ")
def extract_col_value_H2OTwoDimTable(table, col_name):
'''
This function given the column name will extract a list containing the value used for the column name from the
H2OTwoDimTable.
:param table:
:param col_name:
:return:
'''
tableList = []
col_header = table.col_header
colIndex = col_header.index(col_name)
for ind in range(len(table.cell_values)):
temp = table.cell_values[ind]
tableList.append(temp[colIndex])
return tableList
def assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):
'''
This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter
than table2. However, for whatever part of table2 table1 has, they must be the same.
:param table1:
:param table2:
:param col_header_list:
:param tolerance:
:return:
'''
size1 = len(table1.cell_values)
for cname in col_header_list:
colindex = table1.col_header.index(cname)
for cellind in range(size1):
val1 = table1.cell_values[cellind][colindex]
val2 = table2.cell_values[cellind][colindex]
if isinstance(val1, float) and isinstance(val2, float) and not(math.isnan(val1) and math.isnan(val2)):
assert abs(val1-val2) < tolerance, \
"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
elif not(isinstance(val1, float) and isinstance(val2, float)) :
assert val1==val2, "table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
print("******* Congrats! Test passed. ")
def assert_equal_scoring_history(model1, model2, col_compare_list, tolerance=1e-6):
scoring_hist1 = model1._model_json["output"]["scoring_history"]
scoring_hist2 = model2._model_json["output"]["scoring_history"]
assert_H2OTwoDimTable_equal_upto(scoring_hist1, scoring_hist2, col_compare_list, tolerance=tolerance)
def assert_H2OTwoDimTable_equal(table1, table2, col_header_list, tolerance=1e-6, check_sign=False, check_all=True,
num_per_dim=10):
"""
This method compares two H2OTwoDimTables and verify that their difference is less than value set in tolerance. It
is probably an overkill for I have assumed that the order of col_header_list may not be in the same order as
the values in the table.cell_values[ind][0]. In addition, I do not assume an order for the names in the
table.cell_values[ind][0] either for there is no reason for an order to exist.
To limit the test run time, we can test a randomly sampled of points instead of all points
:param table1: H2OTwoDimTable to be compared
:param table2: the other H2OTwoDimTable to be compared
:param col_header_list: list of strings denote names that we want the comparison to be performed
:param tolerance: default to 1e-6
:param check_sign: bool, determine if the sign of values are important or not. For eigenvectors, they are not.
:param check_all: bool, determine if we need to compare every single element
:param num_per_dim: integer, number of elements to sample per dimension. We have 3 here.
:return: None if comparison succeed and raise an error if comparison failed for whatever reason
"""
num_comparison = len(set(col_header_list))
size1 = len(table1.cell_values)
size2 = len(table2.cell_values)
worst_error = 0
assert size1==size2, "The two H2OTwoDimTables are of different size!"
assert num_comparison<=size1, "H2OTwoDimTable do not have all the attributes specified in col_header_list."
flip_sign_vec = generate_sign_vec(table1, table2) if check_sign else [1]*len(table1.cell_values[0]) # correct for sign change for eigenvector comparisons
randRange1 = generate_for_indices(len(table1.cell_values), check_all, num_per_dim, 0)
randRange2 = generate_for_indices(len(table2.cell_values), check_all, num_per_dim, 0)
for ind in range(num_comparison):
col_name = col_header_list[ind]
next_name=False
for name_ind1 in randRange1:
if col_name!=str(table1.cell_values[name_ind1][0]):
continue
for name_ind2 in randRange2:
if not(col_name==str(table2.cell_values[name_ind2][0])):
continue
# now we have the col header names, do the actual comparison
if str(table1.cell_values[name_ind1][0])==str(table2.cell_values[name_ind2][0]):
randRange3 = generate_for_indices(min(len(table2.cell_values[name_ind2]), len(table1.cell_values[name_ind1])), check_all, num_per_dim,1)
for indC in randRange3:
val1 = table1.cell_values[name_ind1][indC]
val2 = table2.cell_values[name_ind2][indC]*flip_sign_vec[indC]
if isinstance(val1, float) and isinstance(val2, float):
compare_val_ratio = abs(val1-val2)/max(1, abs(val1), abs(val2))
if compare_val_ratio > tolerance:
print("Table entry difference is {0} at dimension {1} and eigenvector number "
"{2}".format(compare_val_ratio, name_ind1, indC))
print("The first vector is {0} and the second vector is {1}".format(table1.cell_values[name_ind1], table2.cell_values[name_ind2]))
assert False, "Table entries are not equal within tolerance."
worst_error = max(worst_error, compare_val_ratio)
else:
assert False, "Tables contains non-numerical values. Comparison is for numericals only!"
next_name=True
break
else:
assert False, "Unknown metric names found in col_header_list."
if next_name: # ready to go to the next name in col_header_list
break
print("******* Congrats! Test passed. Maximum difference of your comparison is {0}".format(worst_error))
def generate_for_indices(list_size, check_all, num_per_dim, start_val):
if check_all:
return list(range(start_val, list_size))
else:
randomList = list(range(start_val, list_size))
random.shuffle(randomList)
return randomList[0:min(list_size, num_per_dim)]
def generate_sign_vec(table1, table2):
sign_vec = [1]*len(table1.cell_values[0])
for indC in range(1, len(table2.cell_values[0])): # may need to look at other elements since some may be zero
for indR in range(0, len(table2.cell_values)):
if (abs(table1.cell_values[indR][indC]) > 0) and (abs(table2.cell_values[indR][indC]) > 0):
sign_vec[indC] = int(np.sign(table1.cell_values[indR][indC]) * np.sign(table2.cell_values[indR][indC]))
# if (np.sign(table1.cell_values[indR][indC])!=np.sign(table2.cell_values[indR][indC])):
# sign_vec[indC] = -1
# else:
# sign_vec[indC] = 1
break # found what we need. Goto next column
return sign_vec
def equal_two_dicts(dict1, dict2, tolerance=1e-6, throwError=True):
size1 = len(dict1)
if (size1 == len(dict2)): # only proceed if lengths are the same
for key1 in dict1.keys():
diff = abs(dict1[key1]-dict2[key1])
if (diff > tolerance):
if throwError:
assert False, "Dict 1 value {0} and Dict 2 value {1} do not agree.".format(dict1[key1], dict2[key1])
else:
return False
def equal_two_arrays(array1, array2, eps=1e-6, tolerance=1e-6, throw_error=True):
"""
This function will compare the values of two python tuples. First, if the values are below
eps which denotes the significance level that we care, no comparison is performed. Next,
False is returned if the different between any elements of the two array exceeds some tolerance.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param throw_error: throws error when two arrays are not equal
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(array1)
if size1 == len(array2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if not ((array1[ind] < eps) and (array2[ind] < eps)):
# values to be compared are not too small, perform comparison
# look at differences between elements of array1 and array2
compare_val_h2o_py = abs(array1[ind] - array2[ind])
if compare_val_h2o_py > tolerance: # difference is too high, return false
if throw_error:
assert False, "Array 1 value {0} and array 2 value {1} do not agree.".format(array1[ind], array2[ind])
else:
return False
return True # return True, elements of two arrays are close enough
else:
if throw_error:
assert False, "The two arrays are of different size!"
else:
return False
def equal_2d_tables(table1, table2, tolerance=1e-6):
"""
This function will compare the values of two python tuples.
False is returned if the different between any elements of the two array exceeds some tolerance.
:param table1: numpy array containing some values of interest
:param table2: numpy array containing some values of interest that we would like to compare it with array1
:param tolerance: threshold for which we allow the two array elements to be different by
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(table1)
if size1 == len(table2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if len(table1[ind]) == len(table2[ind]):
for ind2 in range(len(table1[ind])):
if type(table1[ind][ind2]) == float:
if abs(table1[ind][ind2]-table2[ind][ind2]) > tolerance:
return False
else:
assert False, "The two arrays are of different size!"
return True
else:
assert False, "The two arrays are of different size!"
def compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string,
success_string, template_is_better, just_print=False):
"""
This function is written to print out the performance comparison results for various values that
we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance.
The actual comparison is performed by calling function equal_two_array.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param comparison_string: string stating what the comparison is about, e.g. "Comparing p-values ...."
:param array1_string: string stating what is the array1 attribute of interest, e.g. "H2O p-values: "
:param array2_string: string stating what is the array2 attribute of interest, e.g. "Theoretical p-values: "
:param error_string: string stating what you want to say if the difference between array1 and array2
exceeds tolerance, e.g "P-values are not equal!"
:param success_string: string stating what you want to say if the difference between array1 and array2 does not
exceed tolerance "P-values are close enough!"
:param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds
tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance.
In this case, the system under test actually performs better than the template.
:param just_print: bool if True will print attribute values without doing comparison. False will print
attribute values and perform comparison
:return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise;
if template_is_better = False, will always return 0 since system under tests performs better than
template system.
"""
# display array1, array2 with proper description
print(comparison_string)
print(array1_string, array1)
print(array2_string, array2)
if just_print: # just print the two values and do no comparison
return 0
else: # may need to actually perform comparison
if template_is_better:
try:
assert equal_two_arrays(array1, array2, eps, tolerance), error_string
print(success_string)
sys.stdout.flush()
return 0
except:
sys.stdout.flush()
return 1
else:
print("Test result is actually better than comparison template!")
return 0
def make_Rsandbox_dir(base_dir, test_name, make_dir):
"""
This function will remove directory "Rsandbox/test_name" off directory base_dir and contents if it exists.
If make_dir is True, it will create a clean directory "Rsandbox/test_name" off directory base_dir.
:param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from
:param test_name: string contains unit test name that the Rsandbox is created for
:param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create
directory.
:return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name
"""
# create the Rsandbox directory path for the test.
syndatasets_dir = os.path.join(base_dir, "Rsandbox_" + test_name)
if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists
shutil.rmtree(syndatasets_dir)
if make_dir: # create Rsandbox directory if make_dir is True
os.makedirs(syndatasets_dir)
return syndatasets_dir
def get_train_glm_params(model, what_param, family_type='gaussian'):
"""
This function will grab the various attributes (like coefficients, p-values, and others) off a GLM
model that has been built.
:param model: GLM model that we want to extract information from
:param what_param: string indicating the model attribute of interest like 'p-value','weights',...
:param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:return: attribute value of interest
"""
coeff_pvalues = model._model_json["output"]["coefficients_table"].cell_values
if what_param == 'p-values':
if 'gaussian' in family_type.lower():
p_value_h2o = []
for ind in range(len(coeff_pvalues)):
p_value_h2o.append(coeff_pvalues[ind][-1])
return p_value_h2o
else:
assert False, "P-values are only available to Gaussian family."
elif what_param == 'weights':
if 'gaussian' in family_type.lower():
weights = []
for ind in range(len(coeff_pvalues)):
weights.append(coeff_pvalues[ind][1])
return weights
elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
# for multinomial, the coefficients are organized as features by number of classes for
# nonstandardized and then standardized weights. Need to grab the correct matrix as
# number of classes by n_features matrix
num_feature = len(coeff_pvalues)
num_class = (len(coeff_pvalues[0])-1)/2
coeffs = np.zeros((num_class,num_feature), dtype=np.float)
end_index = int(num_class+1)
for col_index in range(len(coeff_pvalues)):
coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index]
return coeffs
elif what_param == 'best_lambda':
lambda_str = model._model_json["output"]["model_summary"].cell_values[0][4].split('=')
return float(str(lambda_str[-2]).split(',')[0])
elif what_param == 'confusion_matrix':
if 'multinomial' in family_type.lower():
return model._model_json["output"]["training_metrics"]._metric_json["cm"]["table"]
elif 'binomial' in family_type.lower():
return model.confusion_matrix().table
else:
assert False, "parameter value not found in GLM model"
def less_than(val1, val2):
"""
Simple function that returns True if val1 <= val2 and False otherwise.
:param val1: first value of interest
:param val2: second value of interest
:return: bool: True if val1 <= val2 and False otherwise
"""
if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point
return True
else:
return False
def replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means):
"""
Given a data set with nans, row and column indices of where the nans are and the col_means, this
function will replace the nans with the corresponding col_means.
:param data_with_nans: data set matrix with nans
:param nans_row_col_indices: matrix containing the row and column indices of where the nans are
:param col_means: vector containing the column means of data_with_NAs
:return: data_with_NAs: data set with nans replaced with column means
"""
num_NAs = len(nans_row_col_indices[0])
for ind in range(num_NAs):
data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \
col_means[nans_row_col_indices[1][ind]]
return data_with_nans
def remove_csv_files(dir_path, suffix=".csv", action='remove', new_dir_path=""):
"""
Given a directory, this function will gather all function ending with string specified
in suffix. Next, it is going to delete those files if action is set to 'remove'. If
action is set to 'copy', a new_dir_path must be specified where the files ending with suffix
will be moved to this new directory instead.
:param dir_path: string representing full path to directory of interest
:param suffix: string representing suffix of filename that are to be found and deleted
:param action: string, optional, denote the action to perform on files, 'remove' or 'move'
:param new_dir_path: string, optional, representing full path to new directory
:return: None
"""
filenames = os.listdir(dir_path) # list all files in directory
# only collect files with filename ending with suffix
to_remove = [filename for filename in filenames if filename.endswith(suffix)]
# delete files ending with suffix
for fn in to_remove:
temp_fn = os.path.join(dir_path, fn)
# only remove if file actually exists.
if os.path.isfile(temp_fn):
if 'remove' in action:
remove_files(temp_fn)
elif 'copy' in action:
move_files(new_dir_path, temp_fn, fn, action=action)
else:
assert False, "action string can only be 'remove' or 'copy."
def extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values,
attr1_bool, attr2_bool, att1_template, att2_template, att3_template,
att4_template, compare_att1_str, h2o_att1_str, template_att1_str,
att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str,
template_att2_str, att2_str_fail, att2_str_success, compare_att3_str,
h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, failed_test_number, ignored_eps, allowed_diff,
noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True):
"""
This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test
with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly
concerned with MSEs. We can set the input parameters to indicate if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param end_test_str: string representing end test banner to be printed
:param want_p_values: bool True if we want to care about p-values and False if we don't
:param attr1_bool: bool True if we want to compare weight difference between H2O model and template model
and False otherwise.
:param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model
and False otherwise.
:param att1_template: value of first template attribute, the weight vector
:param att2_template: value of second template attribute, the p-value vector
:param att3_template: value of third template attribute, the training data set MSE
:param att4_template: value of fourth template attribute, the test data set MSE
:param compare_att1_str: string describing the comparison of first attribute, e.g. "Comparing intercept and
weights ...."
:param h2o_att1_str: string describing H2O model first attribute values, e.g. "H2O intercept and weights: "
:param template_att1_str: string describing template first attribute values, e.g. "Theoretical intercept and
weights: "
:param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Intercept and weights are not equal!"
:param att1_str_success: string describing message to print out if difference < threshold, e.g.
"Intercept and weights are close enough!"
:param compare_att2_str: string describing the comparison of first attribute, e.g. "Comparing p-values ...."
:param h2o_att2_str: string describing H2O model first attribute values, e.g. "H2O p-values: "
:param template_att2_str: string describing template first attribute values, e.g. "Theoretical p-values: "
:param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"P-values are not equal!"
:param att2_str_success: string describing message to print out if difference < threshold, e.g.
"P-values are close enough!"
:param compare_att3_str: string describing the comparison of first attribute, e.g. "Comparing training MSEs ...."
:param h2o_att3_str: string describing H2O model first attribute values, e.g. "H2O training MSE: "
:param template_att3_str: string describing template first attribute values, e.g. "Theoretical train MSE: "
:param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Training MSEs are not equal!"
:param att3_str_success: string describing message to print out if difference < threshold, e.g.
"Training MSEs are close enough!"
:param compare_att4_str: string describing the comparison of first attribute, e.g. "Comparing test MSEs ...."
:param h2o_att4_str: string describing H2O model first attribute values, e.g. "H2O test MSE: "
:param template_att4_str: string describing template first attribute values, e.g. "Theoretical test MSE: "
:param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Test MSEs are not equal!"
:param att4_str_success: string describing message to print out if difference < threshold, e.g.
"Test MSEs are close enough!"
:param failed_test_number: integer denote the number of tests failed
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param noise_var: Gaussian noise variance used to generate data set
:param template_must_be_better: bool: True: template value must be lower, False: don't care
:param attr3_bool: bool denoting if we should compare attribute 3 values
:param attr4_bool: bool denoting if we should compare attribute 4 values
:return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues,
mse_train, r2_train, mse_test, r2_test
"""
# grab weight from h2o model
test1_weight = get_train_glm_params(model_h2o, 'weights')
# grab p-values from h2o model
test1_p_values = []
if want_p_values:
test1_p_values = get_train_glm_params(model_h2o, 'p-values')
# grab other performance metrics
test1_mse_train = model_h2o.mse()
test1_r2_train = model_h2o.r2()
test1_mse_test = h2o_model_test_metrics.mse()
test1_r2_test = h2o_model_test_metrics.r2()
# compare performances of template and h2o model weights
failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str,
h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success,
attr1_bool)
# p-values
if want_p_values:
if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan
failed_test_number += 1
failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff,
compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail,
att2_str_success, attr2_bool)
# Training MSE
need_to_compare = less_than(att3_template, test1_mse_train)
# in some cases, template value should always be better. Training data MSE should always
# be better without regularization than with regularization
if (not need_to_compare) and template_must_be_better:
failed_test_number += 1
failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var,
compare_att3_str, h2o_att3_str,
template_att3_str, att3_str_fail, att3_str_success, attr3_bool)
# Test MSE
need_to_compare = less_than(att4_template, test1_mse_test)
failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, need_to_compare, attr4_bool)
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\
test1_r2_test, failed_test_number
def extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str,
compare_att_str=["", "", "", "", "", "", ""],
h2o_att_str=["", "", "", "", "", "", ""],
template_att_str=["", "", "", "", "", "", ""],
att_str_fail=["", "", "", "", "", "", ""],
att_str_success=["", "", "", "", "", "", ""],
test_model=None, test_model_metric=None, template_params=None,
can_be_better_than_template=[
False, False, False, False, False, False],
just_print=[True, True, True, True, True, True],
ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0):
"""
This function basically will compare and print out six performance metrics of a test with a
template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about comparisons but mainly concerned with
logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate
if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param end_test_str: string to be printed at the end of a test
:param compare_att_str: array of strings describing what we are trying to compare
:param h2o_att_str: array of strings describing each H2O attribute of interest
:param template_att_str: array of strings describing template attribute of interest
:param att_str_fail: array of strings to be printed if the comparison failed
:param att_str_success: array of strings to be printed if comparison succeeded
:param test_model: template model whose attributes we want to compare our H2O model with
:param test_model_metric: performance on test data set of template model
:param template_params: array containing template attribute values that we want to compare our H2O model with
:param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care
:param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes
and if False, will compare the attributes and print the attributes as well
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param failed_test_number: integer denote the number of tests failed so far
:return: accumulated number of tests that have failed so far
"""
# grab performance metrics from h2o model
(h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,
h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics,
family_type)
# grab performance metrics from template model
if test_model and test_model_metric:
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \
grab_model_params_metrics(test_model, test_model_metric, family_type)
elif template_params:
# grab template comparison values from somewhere else
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params
else:
assert False, "No valid template parameters are given for comparison."
# print and/or compare the weights between template and H2O
compare_index = 0
failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# this is logloss from training data set,
if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]):
if (h2o_logloss_train < template_logloss_train) and \
(abs(h2o_logloss_train-template_logloss_train) > 1e-5):
# H2O performed better than template which is not allowed
failed_test_number += 1 # increment failed_test_number and just print the results
compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, True)
else:
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index], True,
False)
else:
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_train, template_logloss_train, False)
# print and compare the logloss between template and H2O for training data
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index],
template_better, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_test, template_logloss_test, False)
# print and compare the logloss between template and H2O for test data
failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print the confusion matrix from training data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# print the confusion matrix from test data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_train, template_accuracy_train, True)
# print accuracy from training dataset
failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print accuracy from test dataset
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_test, template_accuracy_test, True)
failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return failed_test_number
def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):
"""
This function is written to determine if the system under test performs better than the template model
performance.
:param just_print: bool representing if we are just interested in printing the attribute values
:param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform
better than the template system.
:param h2o_att: number representing the h2o attribute under test
:param template_att: number representing the template attribute
:param bigger_is_better: bool representing if metric is perceived to be better if its value is higher
:return: bool indicating if the template attribute is better.
"""
if just_print: # not interested in comparison, just want to print attribute values
return True # does not matter what we return here
else:
if bigger_is_better: # metric is better if it is greater
return not(h2o_att > template_att)
else: # metric is better if it is less
return not(h2o_att < template_att)
def grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type):
"""
This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model
test metrics.
:param model_h2o: GLM H2O model
:param h2o_model_test_metrics: performance on test data set from H2O GLM model
:param family_type: string representing 'gaussian', 'binomial' or 'multinomial'
:return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set
and test data set respectively
"""
# grab weight from h2o model
h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type)
# grab other performance metrics
h2o_logloss_train = model_h2o.logloss()
h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type)
last_index = len(h2o_confusion_matrix_train.cell_values)-1
h2o_logloss_test = h2o_model_test_metrics.logloss()
if 'multinomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix()
h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index]
h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index]
elif 'binomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table
real_last_index = last_index+1
h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index])
h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index])
else:
assert False, "Only 'multinomial' and 'binomial' distribution families are supported for " \
"grab_model_params_metrics function!"
return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\
h2o_confusion_matrix_test, h2o_accuracy_test
def prepare_data_sklearn_multinomial(training_data_xy):
"""
Sklearn model requires that the input matrix should contain a column of ones in order for
it to generate the intercept term. In addition, it wants the response vector to be in a
certain format as well.
:param training_data_xy: matrix containing both the predictors and response column
:return: tuple containing the predictor columns with a column of ones as the first column and
the response vector in the format that Sklearn wants.
"""
(num_row, num_col) = training_data_xy.shape
# change response to be enum and not real
y_ind = num_col-1
training_data_xy[y_ind] = training_data_xy[y_ind].astype(int)
# prepare response column for sklearn logistic regression
response_y = training_data_xy[:, y_ind]
response_y = np.ravel(response_y)
training_data = training_data_xy[:, range(0, y_ind)]
# added column of ones into data matrix X_MAT
temp_ones = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones, training_data), axis=1)
return response_y, x_mat
def get_gridables(params_in_json):
"""
This function is written to walk through all parameters of a model and grab the parameters, its type and
its default values as three lists of all the gridable parameters.
:param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields
of interest like name, type, gridable, default values, ....
:return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,
its associated type like int, float, unicode, bool and default parameter values
"""
# grab all gridable parameters and its type
gridable_parameters = []
gridable_types = []
gridable_defaults = []
for each_param in params_in_json:
if each_param['gridable']:
gridable_parameters.append(str(each_param["name"]))
gridable_types.append(each_param["type"])
if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode
gridable_defaults.append(str(each_param["default_value"]))
else:
gridable_defaults.append(each_param["default_value"])
return gridable_parameters, gridable_types, gridable_defaults
def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):
"""
Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added
columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.
Copied from Eric's code.
:param h2o_frame: H2O frame containing training data
:param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset
:param column_names: list of strings denoting the column names for the new fold columns
:param column_type: optional string denoting whether we are trying to generate fold_assignment or
weights_column or offset_column
:return: H2O frame with added fold column assignments
"""
number_row = h2o_frame.nrow
# copied this part from Eric's code
for index in range(len(column_names)):
if 'fold_assignment' in column_type:
temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive
elif 'weights_column' in column_type:
temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])
elif 'offset_column' in column_type:
temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()
else:
assert False, "column_type must be either 'fold_assignment' or 'weights_column'!"
fold_assignments = h2o.H2OFrame(temp_a)
fold_assignments.set_names([column_names[index]])
h2o_frame = h2o_frame.cbind(fold_assignments)
return h2o_frame
def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,
gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,
min_real_val, quantize_level='1.00000000'):
"""
This function is written to randomly generate griddable parameters for a gridsearch. For parameters already
found in hyper_params, no random list will be generated. In addition, we will check to make sure that the
griddable parameters are actually used by the model before adding them to the hyper_params dict.
:param model_params: list of string containing names of argument to the model
:param hyper_params: dict structure containing a list of gridable parameters names with their list
:param exclude_parameters: list containing parameter names not to be added to hyper_params
:param gridable_parameters: list of gridable parameter names
:param gridable_types: list of gridable parameter types
:param gridable_defaults: list of gridable parameter default values
:param max_int_number: integer, size of integer gridable parameter list
:param max_int_val: integer, maximum integer value for integer gridable parameter
:param min_int_val: integer, minimum integer value for integer gridable parameter
:param max_real_number: integer, size of real gridable parameter list
:param max_real_val: float, maximum real value for real gridable parameter
:param min_real_val: float, minimum real value for real gridable parameter
:param quantize_level: string representing the quantization level of floating point values generated randomly.
:return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:
a list of string containing names of truely gridable parameters, true_gridable_types: a list of string
denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters
"""
count_index = 0
true_gridable_parameters = []
true_gridable_types = []
true_gridable_defaults = []
for para_name in gridable_parameters:
# parameter must not in exclusion list
if (para_name in model_params) and (para_name not in exclude_parameters):
true_gridable_parameters.append(para_name)
true_gridable_types.append(gridable_types[count_index])
true_gridable_defaults.append(gridable_defaults[count_index])
if para_name not in hyper_params.keys(): # add default value to user defined parameter list
# gridable parameter not seen before. Randomly generate values for it
if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):
# make sure integer values are not duplicated, using set action to remove duplicates
hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in
range(0, max_int_number)]))
elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):
hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,
max_real_number)), quantize_level=quantize_level)
count_index += 1
return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults
def fix_float_precision(float_list, quantize_level='1.00000000'):
"""
This function takes in a floating point tuple and attempt to change it to floating point number with fixed
precision.
:param float_list: tuple/list of floating point numbers
:param quantize_level: string, optional, represent the number of fix points we care
:return: tuple of floats to the exact precision specified in quantize_level
"""
fixed_float = []
for num in float_list:
fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))
return list(set(fixed_float))
def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"):
"""
This function performs similar functions to function extract_used_params. However, for max_runtime_secs,
we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to
get the correct value. In addition, we put your algo model specific parameters into params_dict.
:param a_grid_model: list of models generated by gridsearch
:param model_param_names: hyper-parameter names that are specified for the gridsearch.
:param params_dict: dict containing name/value pairs specified to an algo.
:param algo: string, optional, denoting the algo we are looking at.
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
# need to extract the max_runtime_secs ONE cross-validation model or the base model
if a_grid_model._is_xvalidated:
xv_keys = a_grid_model._xval_keys
for id in xv_keys: # only need to get info from one model
each_xv_model = h2o.get_model(id) # get each model
params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)
break
else:
params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)
return params_used
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"):
"""
This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given
the dict structure that describes the parameters and their values used by gridsearch to build that
particular mode.
:param model_param_names: list contains parameter names that we are interested in extracting
:param grid_model_params: dict contains key as names of parameter and values as list of two values: default and
actual.
:param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',
'binomial', ...
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
grid_model_params_keys = grid_model_params.keys()
for each_parameter in model_param_names:
parameter_name = str(each_parameter)
if parameter_name in grid_model_params_keys:
params_used[parameter_name] = grid_model_params[each_parameter]['actual']
if params_dict:
for key, value in params_dict.items():
params_used[key] = value # add distribution family to parameters used list
# only for GLM, change lambda to Lambda
if algo =="GLM":
if 'lambda' in params_used.keys():
params_used['Lambda'] = params_used['lambda']
del params_used['lambda']
return params_used
def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):
"""
This function will randomly introduce errors into a copy of hyper_params. Depending on the random number
error_number generated, the following errors can be introduced:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number other: randomly choose a hyper-parameter and insert an illegal type into it
:param hyper_params: dict containing all legal hyper-parameters for our grid search
:param gridable_parameters: name of griddable parameters (some may not be griddable)
:param gridable_types: type of griddable parameters
:param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters
:return: new dict with errors in either parameter names or parameter values
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
param_index = random.randint(0, len(hyper_params)-1)
param_name = list(hyper_params)[param_index]
param_type = gridable_types[gridable_parameters.index(param_name)]
if error_number == 0: # grab a hyper-param randomly and copy its name twice
new_name = param_name+param_name
error_hyper_params[new_name] = error_hyper_params[param_name]
del error_hyper_params[param_name]
elif error_number == 1:
error_hyper_params[param_name] = []
elif error_number == 2:
new_param = generate_random_words(random.randint(20,100))
error_hyper_params[new_param] = error_hyper_params[param_name]
else:
error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)
return error_hyper_params
def insert_bad_value(error_hyper_params, param_name, param_type):
"""
This function is written to insert a value that is of a different type into an array than the one
its other elements are for.
:param error_hyper_params: dict containing all hyper-parameters for a grid search
:param param_name: string denoting the hyper-parameter we want to insert bad element to
:param param_type: string denoting hyper-parameter type
:return: dict containing new inserted error value
"""
if 'int' in param_type: # insert a real number into integer
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'enum' in param_type: # insert an float into enums
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'double' in param_type: # insert an enum into float
error_hyper_params[param_name].append(random.uniform(0,1) > 0.5)
else: # insert a random string for all other cases
error_hyper_params[param_name].append(generate_random_words(random.randint(20,100)))
return error_hyper_params
def generate_random_words(word_length):
"""
This function will generate a random word consisting of letters, numbers and
punctuation given the word_length.
:param word_length: integer denoting length of the word
:return: string representing the random word
"""
if word_length > 0:
all_chars = string.ascii_letters + string.digits + string.punctuation
return ''.join((random.choice(all_chars)) for index in range(int(word_length)))
else:
assert False, "word_length must be an integer greater than 0."
def generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number):
"""
This function will randomly choose a set of hyper_params and make a dict out of it so we can
duplicate the parameter specification in both the model and grid search.
:param hyper_params: dict containing all griddable parameters as hyper_param to grid search
:param gridable_parameters: list of gridable parameters (not truly)
:param gridable_defaults: list of default values for gridable parameters
:param error_number: int, indicate ways to change the model parameter and the hyper-parameter
Here are the actions performed on the model parameter and hyper-parameters.
error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not
generate error;
error_number = 1: set model parameter to be default value, should not generate error in this case;
error_number = 3: make sure model parameter is not set to default and choose a value not in the
hyper-parameter value list.
:return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
params_dict = {}
num_params = random.randint(1, len(error_hyper_params))
params_list = list(error_hyper_params)
# remove default values out of hyper_params
for key in params_list:
default_value = gridable_defaults[gridable_parameters.index(key )]
if default_value in error_hyper_params[key]:
error_hyper_params[key].remove(default_value)
for index in range(num_params):
param_name = params_list[index]
hyper_params_len = len(error_hyper_params[param_name])
if error_number == 0:
# randomly assigned the parameter to take one value out of the list
param_value_index = random.randint(0, len(error_hyper_params[param_name])-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
elif error_number == 1:
param_value_index = gridable_parameters.index(param_name)
params_dict[param_name] = gridable_defaults[param_value_index]
else:
# randomly assign model parameter to one of the hyper-parameter values, should create error condition here
param_value_index = random.randint(0, hyper_params_len-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
# final check to make sure lambda is Lambda
if 'lambda' in list(params_dict):
params_dict["Lambda"] = params_dict['lambda']
del params_dict["lambda"]
return params_dict, error_hyper_params
def count_models(hyper_params):
"""
Given a hyper_params dict, this function will return the maximum number of models that can be built out of all
the combination of hyper-parameters.
:param hyper_params: dict containing parameter name and a list of values to iterate over
:return: max_model_number: int representing maximum number of models built
"""
max_model_number = 1
for key in list(hyper_params):
max_model_number *= len(hyper_params[key])
return max_model_number
def error_diff_2_models(grid_table1, grid_table2, metric_name):
"""
This function will take two models generated by gridsearch and calculate the mean absolute differences of
the metric values specified by the metric_name in the two model. It will return the mean differences.
:param grid_table1: first H2OTwoDimTable generated by gridsearch
:param grid_table2: second H2OTwoDimTable generated by gridsearch
:param metric_name: string, name of the metric of interest
:return: real number which is the mean absolute metric difference between the two models
"""
num_model = len(grid_table1.cell_values)
metric_diff = 0
for model_index in range(num_model):
metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1])
if (num_model > 0):
return metric_diff/num_model
else:
assert False, "error_diff_2_models: your table contains zero models."
def find_grid_runtime(model_list):
"""
This function given a grid_model built by gridsearch will go into the model and calculate the total amount of
time it took to actually build all the models in second
:param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation
enabled.
:return: total_time_sec: total number of time in seconds in building all the models
"""
total_time_sec = 0
for each_model in model_list:
total_time_sec += each_model._model_json["output"]["run_time"] # time in ms
# if cross validation is used, need to add those run time in here too
if each_model._is_xvalidated:
xv_keys = each_model._xval_keys
for id in xv_keys:
each_xv_model = h2o.get_model(id)
total_time_sec += each_xv_model._model_json["output"]["run_time"]
return total_time_sec/1000.0 # return total run time in seconds
def evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number):
"""
This function given a list of dict that contains the value of metric_name will manually go through the
early stopping condition and see if the randomized grid search will give us the correct number of models
generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually
already come sorted which we do not want....
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa
:param search_criteria: dict structure containing the search criteria for randomized gridsearch
:param possible_model_number: integer, represent the absolute possible number of models built based on the
hyper-parameter size
:return: bool indicating if the early topping condition is justified
"""
tolerance = search_criteria["stopping_tolerance"]
stop_round = search_criteria["stopping_rounds"]
min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation
metric_list = [] # store metric of optimization
stop_now = False
# provide metric list sorted by time. Oldest model appear first.
metric_list_time_ordered = sort_model_by_time(model_list, metric_name)
for metric_value in metric_list_time_ordered:
metric_list.append(metric_value)
if len(metric_list) > min_list_len: # start early stopping evaluation now
stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better)
if stop_now:
if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch
return False
else: # randomized gridsearch stopped at the correct condition
return True
if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch
return True # if max number of model built, still ok
else:
return False # early stopping condition never met but random gridsearch did not build all models, bad!
def sort_model_by_time(model_list, metric_name):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:return: model_metric_list sorted by time
"""
model_num = len(model_list)
model_metric_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
model_metric_list[model_index] = \
model_list[index]._model_json["output"]["cross_validation_metrics"]._metric_json[metric_name]
return model_metric_list
def evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better):
"""
This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file
comment to see the explanation of how the early stopping works.
:param metric_list: list containing the optimization metric under consideration for gridsearch model
:param stop_round: integer, determine averaging length
:param tolerance: real, tolerance to see if the grid search model has improved enough to keep going
:param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa
:return: bool indicating if we should stop early and sorted metric_list
"""
if (bigger_is_better):
metric_list.reverse()
shortest_len = 2*stop_round
if (isinstance(metric_list[0], float)):
startIdx = 0
else:
startIdx = 1
bestInLastK = 1.0*sum(metric_list[startIdx:stop_round])/stop_round
lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round
if not(np.sign(bestInLastK) == np.sign(lastBeforeK)):
return False
ratio = bestInLastK/lastBeforeK
if math.isnan(ratio):
return False
if bigger_is_better:
return not (ratio > 1+tolerance)
else:
return not (ratio < 1-tolerance)
def check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one,
params_zero_positive, max_grid_model):
"""
This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that
will contain a smaller number of grid_models. It will determine how many models will be built from
this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that
are illegal.
:param hyper_params: dict containing model parameter names and list of values to set it to
:param params_zero_one: list containing model parameter names whose values must be between 0 and 1
:param params_more_than_zero: list containing model parameter names whose values must exceed zero
:param params_more_than_one: list containing model parameter names whose values must exceed one
:param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero
:param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space
:return: total model: integer denoting number of grid models that can be built from all legal parameter settings
in new hyper_parameter space
final_hyper_params: dict of new hyper parameter space derived from the original hyper_params
"""
total_model = 1
hyper_keys = list(hyper_params)
random.shuffle(hyper_keys) # get all hyper_parameter names in random order
final_hyper_params = dict()
for param in hyper_keys:
# this param should be > 0 and <= 2
if param == "col_sample_rate_change_per_level":
param_len = len([x for x in hyper_params["col_sample_rate_change_per_level"] if (x > 0)
and (x <= 2)])
elif param in params_zero_one:
param_len = len([x for x in hyper_params[param] if (x >= 0)
and (x <= 1)])
elif param in params_more_than_zero:
param_len = len([x for x in hyper_params[param] if (x > 0)])
elif param in params_more_than_one:
param_len = len([x for x in hyper_params[param] if (x > 1)])
elif param in params_zero_positive:
param_len = len([x for x in hyper_params[param] if (x >= 0)])
else:
param_len = len(hyper_params[param])
if (param_len >= 0) and ((total_model*param_len) <= max_grid_model):
total_model *= param_len
final_hyper_params[param] = hyper_params[param]
elif (total_model*param_len) > max_grid_model:
break
return total_model, final_hyper_params
def write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters):
"""
Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes.
:param dir1: String containing first directory where you want to write the json file to
:param dir2: String containing second directory where you want to write the json file to
:param json_filename: String containing json file name
:param hyper_parameters: dict containing hyper-parameters used
"""
# save hyper-parameter file in test directory
with open(os.path.join(dir1, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
# save hyper-parameter file in sandbox
with open(os.path.join(dir2, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
def compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True,
custom_comparators=None):
"""
This function will compare two H2O frames to make sure their dimension, and values in all cells are the same.
It will not compare the column names though.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
Set to 0 or negative number if you want to compare all elements.
:param tol_time: optional parameter to limit time value difference.
:param tol_numeric: optional parameter to limit numeric value difference.
:param strict: optional parameter to enforce strict comparison or not. If True, column type must
match in order to pass the test.
:param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the
NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA.
In this case, do not compare the number of NAs.
:param custom_comparators: dictionary specifying custom comparators for some columns.
:return: boolean: True, the two frames are equal and False otherwise.
"""
# check frame dimensions
rows1, cols1 = frame1.dim
rows2, cols2 = frame2.dim
assert rows1 == rows2 and cols1 == cols2, "failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} " \
"frame2 cols:{3}".format(rows1, rows2, cols1, cols2)
na_frame1 = frame1.isna().sum().sum(axis=1)[:,0]
na_frame2 = frame2.isna().sum().sum(axis=1)[:,0]
probVal = numElements/rows1 if numElements > 0 else 1
if compare_NA: # check number of missing values
assert na_frame1.flatten() == na_frame2.flatten(), "failed numbers of NA check! Frame 1 NA number: {0}, frame 2 " \
"NA number: {1}".format(na_frame1.flatten(), na_frame2.flatten())
# check column types are the same before proceeding to check each row content.
for col_ind in range(cols1):
c1_key = frame1.columns[col_ind]
c2_key = frame2.columns[col_ind]
c2_type = frame2.types[c2_key]
c1_type = frame1.types[c1_key]
print("###### Comparing column: {0} and column type is {1}.".format(col_ind, c1_type))
if strict: # every column type must match
assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \
"{1}".format(c1_type, c2_type)
else:
if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here
frame1[col_ind].asfactor()
if custom_comparators and c1_key in custom_comparators:
custom_comparators[c1_key](frame1, frame2, col_ind, rows1, numElements)
elif (str(c1_type) == 'string') or (str(c1_type) == 'enum'):
# compare string
compare_frames_local_onecolumn_NA_string(frame1[col_ind], frame2[col_ind], prob=probVal)
else:
if str(c2_type) == 'time': # compare time columns
compare_frames_local_onecolumn_NA(frame1[col_ind], frame2[col_ind], prob=probVal, tol=tol_time)
else:
compare_frames_local_onecolumn_NA(frame1[col_ind], frame2[col_ind], prob=probVal, tol=tol_numeric)
return True
def catch_warnings():
import warnings
warnings.simplefilter("always", RuntimeWarning)
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
return warnings.catch_warnings(record=True)
def contains_warning(ws, message):
return any(issubclass(w.category, RuntimeWarning) and message in str(w.message) for w in ws)
def no_warnings(ws):
return len(ws) == 0
def expect_warnings(filewithpath, warn_phrase="warn", warn_string_of_interest="warn", number_of_times=1, in_hdfs=False):
"""
This function will execute a command to run and analyze the print outs of
running the command. The goal here is to capture any warnings that we may expect
out of running those commands.
:param filewithpath: name of file to be parsed with path
:param warn_phrase: capture the warning header, sometimes it is warn or userwarn.
:param warn_string_of_interest: specific warning message string
:param number_of_times: number of warning lines we are expecting.
:return: True if warning was found and False otherwise
"""
number_warngings = 0
buffer = StringIO() # redirect warning messages to string buffer for later analysis
sys.stderr = buffer
frame = None
if in_hdfs:
frame = h2o.import_file(filewithpath)
else:
frame = h2o.import_file(path=locate(filewithpath))
sys.stderr = sys.__stderr__ # redirect it back to stdout.
try: # for python 2.7
if len(buffer.buflist) > 0:
for index in range(len(buffer.buflist)):
print("*** captured warning message: {0}".format(buffer.buflist[index]))
if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]):
number_warngings = number_warngings+1
except: # for python 3.
warns = buffer.getvalue()
print("*** captured warning message: {0}".format(warns))
if (warn_phrase in warns) and (warn_string_of_interest in warns):
number_warngings = number_warngings+1
print("Number of warnings found: {0} and number of times that warnings should appear {1}.".format(number_warngings,
number_of_times))
if number_warngings >= number_of_times:
return True
else:
return False
def compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False):
"""
This method is written to compare the frame summary between two frames.
:param frame1_summary:
:param frame2_summary:
:param compareNames:
:param compareTypes:
:return:
"""
frame1_column_number = len(frame1_summary)
frame2_column_number = len(frame2_summary)
assert frame1_column_number == frame2_column_number, "failed column number check! Frame 1 column number: {0}," \
"frame 2 column number: {1}".format(frame1_column_number,
frame2_column_number)
for col_index in range(frame1_column_number): # check summary for each column
for key_val in list(frame1_summary[col_index]):
if not(compareNames) and (str(key_val) == 'label'):
continue
if not(compareTypes) and (str(key_val) == 'type'):
continue
if str(key_val) == 'precision': # skip comparing precision
continue
val1 = frame1_summary[col_index][key_val]
val2 = frame2_summary[col_index][key_val]
if isinstance(val1, list) or isinstance(val1, dict):
if isinstance(val1, dict):
assert val1 == val2, "failed column summary comparison for column {0} and summary " \
"type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if len(val1) > 0:
# find if elements are float
float_found = False
for ind in range(len(val1)):
if isinstance(val1[ind], float):
float_found = True
break
if float_found:
for ind in range(len(val1)):
if not(str(val1[ind] == 'NaN')):
assert abs(val1[ind]-val2[ind]) < 1e-5, "failed column summary comparison for " \
"column {0} and summary type {1}, frame 1" \
" value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val),
val1[ind], val2[ind])
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary" \
" type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if isinstance(val1, float):
assert abs(val1-val2) < 1e-5, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
def cannaryHDFSTest(hdfs_name_node, file_name):
"""
This function is written to detect if the hive-exec version is too old. It will return
True if it is too old and false otherwise.
:param hdfs_name_node:
:param file_name:
:return:
"""
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, file_name)
try:
tempFrame = h2o.import_file(url_orc)
h2o.remove(tempFrame)
print("Your hive-exec version is good. Parsing success for {0}.".format(url_orc))
return False
except Exception as e:
print("Error exception is {0}".format(str(e)))
if "NoSuchFieldError: vector" in str(e):
return True
else: # exception is caused by other reasons.
return False
def extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
return extract_from_twoDimTable(aModel._model_json["output"]["scoring_history"], fieldOfInterest, takeFirst)
def extract_from_twoDimTable(metricOfInterest, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
allFields = metricOfInterest._col_header
return extract_field_from_twoDimTable(allFields, metricOfInterest.cell_values, fieldOfInterest, takeFirst=False)
def extract_field_from_twoDimTable(allFields, cell_values, fieldOfInterest, takeFirst=False):
if fieldOfInterest in allFields:
cellValues = []
fieldIndex = allFields.index(fieldOfInterest)
for eachCell in cell_values:
cellValues.append(eachCell[fieldIndex])
if takeFirst: # only grab the result from the first iteration.
break
return cellValues
else:
return None
def model_run_time_sorted_by_time(model_list):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model run time in secs sorted by order of building
"""
model_num = len(model_list)
model_runtime_sec_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1]) - 1 # model names start at 1
model_runtime_sec_list[model_index] = \
(model_list[index]._model_json["output"]["run_time"]/1000.0)
return model_runtime_sec_list
def model_seed_sorted(model_list):
"""
This function is written to find the seed used by each model in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model seed sorted by order of building
"""
model_num = len(model_list)
model_seed_list = [None] * model_num
for index in range(model_num):
for pIndex in range(len(model_list.models[0]._model_json["parameters"])):
if model_list.models[index]._model_json["parameters"][pIndex]["name"]=="seed":
model_seed_list[index]=model_list.models[index]._model_json["parameters"][pIndex]["actual_value"]
break
model_seed_list.sort()
return model_seed_list
def check_ignore_cols_automl(models,names,x,y):
models = sum(models.as_data_frame().values.tolist(),[])
for model in models:
if "StackedEnsemble" in model:
continue
else:
assert set(h2o.get_model(model).params["ignored_columns"]["actual"]) == set(names) - {y} - set(x), \
"ignored columns are not honored for model " + model
# This method is not changed to local method using as_data_frame because the frame size is too big.
def check_sorted_2_columns(frame1, sorted_column_indices, prob=0.5, ascending=[True, True]):
for colInd in sorted_column_indices:
for rowInd in range(0, frame1.nrow-1):
if (random.uniform(0.0,1.0) < prob):
if colInd == sorted_column_indices[0]:
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else: # for second column
if not(math.isnan(frame1[rowInd, sorted_column_indices[0]])) and not(math.isnan(frame1[rowInd+1,sorted_column_indices[0]])):
if (frame1[rowInd,sorted_column_indices[0]]==frame1[rowInd+1, sorted_column_indices[0]]): # meaningful to compare row entries then
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
# This method is not changed to local method using as_data_frame because the frame size is too big.
def check_sorted_1_column(frame1, sorted_column_index, prob=0.5, ascending=True):
totRow = frame1.nrow * prob
skipRow = int(frame1.nrow/totRow)
for rowInd in range(0, frame1.nrow-1, skipRow):
if not (math.isnan(frame1[rowInd, sorted_column_index])) and not (
math.isnan(frame1[rowInd + 1, sorted_column_index])):
if ascending:
assert frame1[rowInd, sorted_column_index] <= frame1[
rowInd + 1, sorted_column_index], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd,
frame1[rowInd, sorted_column_index],
rowInd + 1,
frame1[rowInd + 1, sorted_column_index])
else:
assert frame1[rowInd, sorted_column_index] >= frame1[
rowInd + 1, sorted_column_index], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd,
frame1[rowInd, sorted_column_index],
rowInd + 1,
frame1[rowInd + 1, sorted_column_index])
def assert_correct_frame_operation(sourceFrame, h2oResultFrame, operString):
"""
This method checks each element of a numeric H2OFrame and throw an assert error if its value does not
equal to the same operation carried out by python.
:param sourceFrame: original H2OFrame.
:param h2oResultFrame: H2OFrame after operation on original H2OFrame is carried out.
:param operString: str representing one of 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'ceil', 'cos', 'cosh', 'cospi', 'cumprod', 'cumsum', 'digamma', 'exp', 'expm1', 'floor', 'round',
'sin', 'sign', 'round', 'sinh', 'tan', 'tanh'
:return: None.
"""
validStrings = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh',
'exp', 'floor', 'gamma', 'lgamma', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'trigamma', 'expm1']
npValidStrings = ['log2', 'sign']
nativeStrings = ['round', 'abs', 'cumsum']
multpi = ['cospi', 'sinpi', 'tanpi']
others = ['log1p', 'signif', 'trigamma', 'digamma', 'cumprod']
# check for valid operString
assert operString in validStrings+npValidStrings+nativeStrings+multpi+others, "Illegal operator " \
"{0} specified.".format(operString)
result_comp = lambda x:x # default method
if operString == "log1p":
result_comp = lambda x:math.log(x+1)
elif operString == 'signif':
result_comp = lambda x:round(x, 7)
elif operString == 'trigamma':
result_comp = lambda x:scipy.special.polygamma(1, x)
elif operString == 'digamma':
result_comp = lambda x:scipy.special.polygamma(0, x)
elif operString=='cumprod':
result_comp = lambda x:factorial(x)
# stringOperations = 'result_val = factorial(sourceFrame[row_ind, col_ind])'
elif operString in validStrings:
result_comp = lambda x:getattr(math, operString)(x)
elif operString in nativeStrings:
result_comp =lambda x:__builtins__.get(operString)(x)
stringOperations = 'result_val = '+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in npValidStrings:
result_comp = lambda x:getattr(np, operString)(x)
# stringOperations = 'result_val = np.'+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in multpi:
result_comp = lambda x:getattr(math, operString.split('p')[0])(x*math.pi)
#stringOperations = 'result_val = math.'+operString.split('p')[0]+'(sourceFrame[row_ind, col_ind]*math.pi)'
for col_ind in range(sourceFrame.ncols):
for row_ind in range(sourceFrame.nrows):
result_val = result_comp(sourceFrame[row_ind, col_ind])
assert abs(h2oResultFrame[row_ind, col_ind]-result_val) <= 1e-6, \
" command {0}({3}) is not working. Expected: {1}. Received: {2}".format(operString, result_val,
h2oResultFrame[row_ind, col_ind], sourceFrame[row_ind, col_ind])
def factorial(n):
"""
Defined my own factorial just in case using python2.5 or less.
:param n:
:return:
"""
if n>0 and n<2:
return 1
if n>=2:
return n*factorial(n-1)
def cumop(items, op, colInd=0): # take in one column only
res = [None]*len(items)
for index in range(len(items)):
res[index] = op(res[index-1], items[index, colInd]) if index > 0 else items[index, colInd]
return res
def compare_string_frames_local(f1, f2, prob=0.5):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
cname1 = temp1[0]
cname2 = temp2[0]
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
name1 = cname1[colInd]
for rowInd in range(1, f2.nrow):
if random.uniform(0,1) < prob:
assert temp1[rowInd][colInd]==temp2[rowInd][cname2.index(name1)], "Failed frame values check at row {2} and column {3}! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd)
def check_data_rows(f1, f2, index_list=[], num_rows=10):
'''
This method will compare the relationships of the data rows within each frames. In particular, we are
interested in the relative direction of each row vectors and the relative distances. No assertions will
be thrown.
:param f1:
:param f2:
:param index_list:
:param num_rows:
:return:
'''
temp1 = f1.as_data_frame(use_pandas=True).as_matrix()
temp2 = f2.as_data_frame(use_pandas=True).as_matrix()
if len(index_list)==0:
index_list = random.sample(range(f1.nrow), num_rows)
maxInnerProduct = 0
maxDistance = 0
for row_index in range(1, len(index_list)):
r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])
r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])
d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])
d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])
diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))
maxInnerProduct = max(maxInnerProduct, diff1)
diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))
maxDistance = max(maxDistance, diff2)
print("Maximum inner product different is {0}. Maximum distance difference is "
"{1}".format(maxInnerProduct, maxDistance))
def compare_data_rows(f1, f2, index_list=[], num_rows=10, tol=1e-3):
'''
This method will compare the relationships of the data rows within each frames. In particular, we are
interested in the relative direction of each row vectors and the relative distances. An assertion will be
thrown if they are different beyond a tolerance.
:param f1:
:param f2:
:param index_list:
:param num_rows:
:return:
'''
temp1 = f1.as_data_frame(use_pandas=True).as_matrix()
temp2 = f2.as_data_frame(use_pandas=True).as_matrix()
if len(index_list)==0:
index_list = random.sample(range(f1.nrow), num_rows)
maxInnerProduct = 0
maxDistance = 0
for row_index in range(1, len(index_list)):
r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])
r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])
d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])
d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])
diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))
maxInnerProduct = max(maxInnerProduct, diff1)
diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))
maxDistance = max(maxDistance, diff2)
assert diff1 < tol, \
"relationship between data row {0} and data row {1} are different among the two dataframes. Inner " \
"product from frame 1 is {2}. Inner product from frame 2 is {3}. The difference between the two is" \
" {4}".format(index_list[row_index-1], index_list[row_index], r1, r2, diff1)
assert diff2 < tol, \
"distance betwee data row {0} and data row {1} are different among the two dataframes. Distance " \
"between 2 rows from frame 1 is {2}. Distance between 2 rows from frame 2 is {3}. The difference" \
" between the two is {4}".format(index_list[row_index-1], index_list[row_index], d1, d2, diff2)
print("Maximum inner product different is {0}. Maximum distance difference is "
"{1}".format(maxInnerProduct, maxDistance))
def compute_frame_diff(f1, f2):
'''
This method will take the absolute difference two frames and sum across all elements
:param f1:
:param f2:
:return:
'''
frameDiff = h2o.H2OFrame.sum(h2o.H2OFrame.sum(h2o.H2OFrame.abs(f1-f2)), axis=1)[0,0]
return frameDiff
def compare_frames_local(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
'''
Compare two h2o frames and make sure they are equal. However, we do not compare uuid column at this point
:param f1:
:param f2:
:param prob:
:param tol:
:param returnResult:
:return:
'''
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "Frame 1 row {0}, col {1}. Frame 2 row {2}, col {3}. They are " \
"different.".format(f1.nrow, f1.ncol, f2.nrow, f2.ncol)
typeDict = f1.types
frameNames = f1.names
for colInd in range(f1.ncol):
if (typeDict[frameNames[colInd]]==u'enum'):
if returnResult:
result = compare_frames_local_onecolumn_NA_enum(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
result = compare_frames_local_onecolumn_NA_enum(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
elif (typeDict[frameNames[colInd]]==u'string'):
if returnResult:
result = compare_frames_local_onecolumn_NA_string(f1[colInd], f2[colInd], prob=prob, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
compare_frames_local_onecolumn_NA_string(f1[colInd], f2[colInd], prob=prob, returnResult=returnResult)
elif (typeDict[frameNames[colInd]]==u'uuid'):
continue # do nothing here
else:
if returnResult:
result = compare_frames_local_onecolumn_NA(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
compare_frames_local_onecolumn_NA(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if returnResult:
return True
def compare_frames_local_svm(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
'''
compare f1 and f2 but with f2 parsed from svmlight parser. Here, the na's should be replaced with 0.0
:param f1: normal h2oFrame
:param f2: h2oFrame parsed from a svmlight parser.
:param prob:
:param tol:
:param returnResult:
:return:
'''
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
for rowInd in range(1, f1.nrow):
for colInd in range(f1.ncol):
if (len(temp1[rowInd][colInd]))==0: # encounter NAs
if returnResult:
if (abs(float(temp2[rowInd][colInd]))) > tol:
return False
assert (abs(float(temp2[rowInd][colInd]))) <= tol, \
"Expected: 0.0 but received: {0} for row: {1}, col: " \
"{2}".format(temp2[rowInd][colInd], rowInd, colInd)
else:
if returnResult:
if abs(float(temp1[rowInd][colInd])-float(temp2[rowInd][colInd]))>tol:
return False
assert abs(float(temp1[rowInd][colInd])-float(temp2[rowInd][colInd]))<=tol, \
"Expected: {1} but received: {0} for row: {2}, col: " \
"{3}".format(temp2[rowInd][colInd], temp1[rowInd][colInd], rowInd, colInd)
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA(f1, f2, prob=0.5, tol=1e-6, returnResult=False, oneLessRow=False):
if (f1.types[f1.names[0]] == u'time'): # we have to divide by 1000 before converting back and forth between ms and time format
tol = 10
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
if oneLessRow:
lastF2Row = f2.nrow
else:
lastF2Row = f2.nrow+1
for colInd in range(f1.ncol):
for rowInd in range(1,lastF2Row):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
v1 = float(temp1[rowInd][colInd])
v2 = float(temp2[rowInd][colInd])
diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2))
if returnResult:
if (diff > tol):
return False
else:
assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA_enum(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
for rowInd in range(1,f2.nrow+1):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
if returnResult:
if not(temp1[rowInd][colInd]==temp2[rowInd][colInd]):
return False
else:
assert temp1[rowInd][colInd]==temp2[rowInd][colInd], "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA_string(f1, f2, prob=0.5, returnResult=False):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
for rowInd in range(1,f2.nrow+1):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
if returnResult:
if not(temp1[rowInd][colInd]==temp2[rowInd][colInd]):
return False
else:
assert temp1[rowInd][colInd]==temp2[rowInd][colInd], "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
def build_save_model_generic(params, x, train, respName, algoName, tmpdir):
if algoName.lower() == "gam":
model = H2OGeneralizedAdditiveEstimator(**params)
elif algoName.lower() == "glm":
model = H2OGeneralizedLinearEstimator(**params)
elif algoName.lower() == "gbm":
model = H2OGradientBoostingEstimator(**params)
elif algoName.lower() == "drf":
model = H2ORandomForestEstimator(**params)
else:
raise Exception("build_save_model does not support algo "+algoName+". Please add this to build_save_model.")
model.train(x=x, y=respName, training_frame=train)
model.download_mojo(path=tmpdir)
return model
# generate random dataset, copied from Pasha
def random_dataset(response_type, verbose=True, ncol_upper=25000, ncol_lower=15000, NTESTROWS=200, missing_fraction=0.0, seed=None):
"""Create and return a random dataset."""
if verbose:
print("\nCreating a dataset for a %s problem:" % response_type)
random.seed(seed)
fractions = {k + "_fraction": random.random() for k in "real categorical integer time string binary".split()}
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] /= 3
fractions["time_fraction"] /= 2
sum_fractions = sum(fractions.values())
for k in fractions:
fractions[k] /= sum_fractions
if response_type == 'binomial':
response_factors = 2
elif response_type == 'gaussian':
response_factors = 1
else:
response_factors = random.randint(3, 10)
df = h2o.create_frame(rows=random.randint(ncol_lower, ncol_upper) + NTESTROWS, cols=random.randint(3, 20),
missing_fraction=missing_fraction,
has_response=True, response_factors=response_factors, positive_response=True, factors=10,
seed=seed, **fractions)
if verbose:
print()
df.show()
return df
# generate random dataset of ncolumns of Strings, copied from Pasha
def random_dataset_strings_only(nrow, ncol, seed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 1 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
return h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=0, has_response=False, seed=seed, **fractions)
def random_dataset_all_types(nrow, ncol, seed=None):
fractions=dict()
fractions['real_fraction']=0.16,
fractions['categorical_fraction']=0.16,
fractions['integer_fraction']=0.16,
fractions['binary_fraction']=0.16,
fractions['time_fraction']=0.16,
fractions['string_fraction']=0.2
return h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=0.1, has_response=False, seed=seed)
# generate random dataset of ncolumns of enums only, copied from Pasha
def random_dataset_enums_only(nrow, ncol, factorL=10, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 1
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, factors=factorL,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of enums only, copied from Pasha
def random_dataset_int_only(nrow, ncol, rangeR=10, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 1
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=rangeR,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of integer and reals, copied from Pasha
def random_dataset_numeric_only(nrow, ncol, integerR=100, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0.25 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0.75
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=integerR,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of integer and reals, copied from Pasha
def random_dataset_real_only(nrow, ncol, realR=100, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 1 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=realR,
seed=randSeed, **fractions)
return df
def getMojoName(modelID):
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
return regex.sub("_", modelID)
def convertH2OFrameToDMatrix(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);
return xgb.DMatrix(data=pandas[0], label=pandas[1])
def convertH2OFrameToDMatrixSparse(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);
return xgb.DMatrix(data=csr_matrix(pandas[0]), label=pandas[1])
def __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandaFtrain = h2oFrame.as_data_frame(use_pandas=True, header=True)
nrows = h2oFrame.nrow
if len(enumCols) > 0: # start with first enum column
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows)
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows)
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= h2oFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return (data,label)
def generatePandaEnumCols(pandaFtrain, cname, nrows):
"""
For a H2O Enum column, we perform one-hot-encoding here and added one more column "missing(NA)" to it.
:param pandaFtrain:
:param cname:
:param nrows:
:return:
"""
cmissingNames=[cname+".missing(NA)"]
tempnp = np.zeros((nrows,1), dtype=np.int)
# check for nan and assign it correct value
colVals = pandaFtrain[cname]
for ind in range(nrows):
try:
float(colVals[ind])
if math.isnan(colVals[ind]):
tempnp[ind]=1
except ValueError:
pass
zeroFrame = pd.DataFrame(tempnp)
zeroFrame.columns=cmissingNames
temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)
tempNames = list(temp) # get column names
colLength = len(tempNames)
newNames = ['a']*colLength
newIndics = [0]*colLength
if "." in tempNames[0]:
header = tempNames[0].split('.')[0]
for ind in range(colLength):
newIndics[ind] = int(tempNames[ind].split('.')[1][1:])
newIndics.sort()
for ind in range(colLength):
newNames[ind] = header+'.l'+str(newIndics[ind]) # generate correct order of names
ftemp = temp[newNames]
else:
ftemp = temp
ctemp = pd.concat([ftemp, zeroFrame], axis=1)
return ctemp
def summarizeResult_binomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, tolerance=1e-6):
'''
This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.
This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.
:param h2oPredictD:
:param nativePred:
:param h2oTrainTimeD:
:param nativeTrainTime:
:param h2oPredictTimeD:
:param nativeScoreTime:
:return:
'''
# Result comparison in terms of time
print("H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\n H2OXGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
colnames = h2oPredictD.names
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs(h2oPredictLocalD[colnames[2]][ind]-nativePred[ind])<tolerance, "H2O prediction prob: {0} and native " \
"XGBoost prediction prob: {1}. They are " \
"very different.".format(h2oPredictLocalD[colnames[2]][ind], nativePred[ind])
def summarize_metrics_binomial(h2o_metrics, xgboost_metrics, names, tolerance=1e-4):
for i in range(len(h2o_metrics)):
difference = abs(h2o_metrics[i] - xgboost_metrics[i])
print("H2O {0} metric: {1} and native " \
"XGBoost {0} metric: {2}. " \
"Difference is {3}".format(names[i], h2o_metrics[i], xgboost_metrics[i], difference))
assert difference < tolerance, "H2O {0} metric: {1} and native " \
"XGBoost {0} metric: {2}. They are " \
"very different.".format(names[i], h2o_metrics[i], xgboost_metrics[i])
def summarizeResult_multinomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
nclass = len(nativePred[0])
colnames = h2oPredictD.names
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
for col in range(nclass):
assert abs(h2oPredictLocalD[colnames[col+1]][ind]-nativePred[ind][col])<tolerance, \
"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very " \
"different.".format(h2oPredictLocalD[colnames[col+1]][ind], nativePred[ind][col])
def genTrainFrame(nrow, ncol, enumCols=0, enumFactors=2, responseLevel=2, miscfrac=0, randseed=None):
if ncol>0:
trainFrameNumerics = random_dataset_numeric_only(nrow, ncol, integerR = 1000000, misFrac=miscfrac, randSeed=randseed)
if enumCols > 0:
trainFrameEnums = random_dataset_enums_only(nrow, enumCols, factorL=enumFactors, misFrac=miscfrac, randSeed=randseed)
yresponse = random_dataset_enums_only(nrow, 1, factorL=responseLevel, misFrac=0, randSeed=randseed)
yresponse.set_name(0,'response')
if enumCols > 0:
if ncol > 0: # mixed datasets
trainFrame = trainFrameEnums.cbind(trainFrameNumerics.cbind(yresponse))
else: # contains enum datasets
trainFrame = trainFrameEnums.cbind(yresponse)
else: # contains numerical datasets
trainFrame = trainFrameNumerics.cbind(yresponse)
return trainFrame
def check_xgb_var_imp(h2o_train, h2o_model, xgb_train, xgb_model, tolerance=1e-6):
column_map = dict(zip(h2o_train.names, xgb_train.feature_names))
h2o_var_imps = h2o_model.varimp()
h2o_var_frequencies = h2o_model._model_json["output"]["variable_importances_frequency"].cell_values
freq_map = dict(map(lambda t: (t[0], t[1]), h2o_var_frequencies))
# XGBoost reports average gain of a split
xgb_var_imps = xgb_model.get_score(importance_type="gain")
for h2o_var_imp in h2o_var_imps:
frequency = freq_map[h2o_var_imp[0]]
xgb_var_imp = xgb_var_imps[column_map[h2o_var_imp[0]]]
abs_diff = abs(h2o_var_imp[1]/frequency - xgb_var_imp)
norm = max(1, abs(h2o_var_imp[1]/frequency), abs(xgb_var_imp))
assert abs_diff/norm < tolerance, "Variable importance of feature {0} is different. H2O: {1}, XGB {2}"\
.format(h2o_var_imp[0], h2o_var_imp[1], xgb_var_imp)
def summarizeResult_regression(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD, nativeScoreTime, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time is {0}ms. Native XGBoost train time is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs((h2oPredictLocalD['predict'][ind]-nativePred[ind])/max(1, abs(h2oPredictLocalD['predict'][ind]), abs(nativePred[ind])))<tolerance, \
"H2O prediction: {0} and native XGBoost prediction: {1}. They are very " \
"different.".format(h2oPredictLocalD['predict'][ind], nativePred[ind])
def summarizeResult_binomial_DS(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, h2oPredictS, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time with sparse DMatrix is {0}s. Native XGBoost train time with dense DMtraix is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time with dense DMatrix is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
h2oPredictS['predict'] = h2oPredictS['predict'].asnumeric()
h2oPredictLocalS = h2oPredictS.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs(h2oPredictLocalD['c0.l1'][ind]-nativePred[ind])<tolerance or \
abs(h2oPredictLocalS['c0.l1'][ind]-nativePred[ind])<tolerance, \
"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very " \
"different.".format(h2oPredictLocalD['c0.l1'][ind], nativePred[ind])
def compare_weightedStats(model, dataframe, xlist, xname, weightV, pdpTDTable, tol=1e-6):
'''
This method is used to test the partial dependency plots and is not meant for any other functions.
:param model:
:param dataframe:
:param xlist:
:param xname:
:param weightV:
:param pdpTDTable:
:param tol:
:return:
'''
weightStat = manual_partial_dependence(model, dataframe, xlist, xname, weightV) # calculate theoretical weighted sts
wMean = extract_col_value_H2OTwoDimTable(pdpTDTable, "mean_response") # stats for age predictor
wStd = extract_col_value_H2OTwoDimTable(pdpTDTable, "stddev_response")
wStdErr = extract_col_value_H2OTwoDimTable(pdpTDTable, "std_error_mean_response")
equal_two_arrays(weightStat[0], wMean, tol, tol, throw_error=True)
equal_two_arrays(weightStat[1], wStd, tol, tol, throw_error=True)
equal_two_arrays(weightStat[2], wStdErr, tol, tol, throw_error=True)
def manual_partial_dependence(model, dataframe, xlist, xname, weightV):
meanV = []
stdV = []
stderrV = []
nRows = dataframe.nrow
nCols = dataframe.ncol-1
for xval in xlist:
cons = [xval]*nRows
if xname in dataframe.names:
dataframe=dataframe.drop(xname)
if not((is_type(xval, str) and xval=='NA') or (isinstance(xval, float) and math.isnan(xval))):
dataframe = dataframe.cbind(h2o.H2OFrame(cons))
dataframe.set_name(nCols, xname)
pred = model.predict(dataframe).as_data_frame(use_pandas=False, header=False)
pIndex = len(pred[0])-1
sumEle = 0.0
sumEleSq = 0.0
sumWeight = 0.0
numNonZeroWeightCount = 0.0
m = 1.0/math.sqrt(dataframe.nrow*1.0)
for rindex in range(len(pred)):
val = float(pred[rindex][pIndex]);
weight = float(weightV[rindex][0])
if (abs(weight) > 0) and isinstance(val, float) and not(math.isnan(val)):
temp = val*weight
sumEle = sumEle+temp
sumEleSq = sumEleSq+temp*val
sumWeight = sumWeight+weight
numNonZeroWeightCount = numNonZeroWeightCount+1
wMean = sumEle/sumWeight
scale = numNonZeroWeightCount*1.0/(numNonZeroWeightCount-1)
wSTD = math.sqrt((sumEleSq/sumWeight-wMean*wMean)*scale)
meanV.append(wMean)
stdV.append(wSTD)
stderrV.append(wSTD*m)
return meanV, stdV, stderrV
def compare_frames_equal_names(frame1, frame2):
'''
This method will compare two frames with same column names and column types. The current accepted column
types are enum, int and string.
:param frame1:
:param frame2:
:return:
'''
cnames = frame1.names
ctypes = frame1.types
for cind in range(0, frame1.ncol):
name1 = cnames[cind]
type = str(ctypes[name1])
if (type=="enum"):
compare_frames_local_onecolumn_NA_enum(frame1[name1], frame2[name1], prob=1, tol=0)
elif (type=='string'):
compare_frames_local_onecolumn_NA_string(frame1[name1], frame2[name1], prob=1)
else:
compare_frames_local_onecolumn_NA(frame1[name1], frame2[name1], prob=1, tol=1e-10)
def write_H2OFrame_2_SVMLight(filename, h2oFrame):
'''
The function will write a h2oFrame into svmlight format and save it to a file. However, it only supports
column types of real/integer and nothing else
:param filename:
:param h2oFrame:
:return:
'''
fwriteFile = open(filename, 'w')
ncol = h2oFrame.ncol
nrow = h2oFrame.nrow
fdataframe = h2oFrame.as_data_frame(use_pandas=False)
for rowindex in range(1, nrow+1):
if len(fdataframe[rowindex][0])==0: # special treatment for response column
writeWords = "" # convert na response to 0.0
else:
writeWords = fdataframe[rowindex][0]
for colindex in range(1, ncol):
if not(len(fdataframe[rowindex][colindex])==0):
writeWords = writeWords + " "+str(colindex) + ":"+fdataframe[rowindex][colindex]
fwriteFile.write(writeWords)
fwriteFile.write('\n')
fwriteFile.close()
def write_H2OFrame_2_ARFF(filenameWithPath, filename, h2oFrame, uuidVecs, uuidNames):
'''
This function will write a H2OFrame into arff format and save it to a text file in ARFF format.
:param filename:
:param h2oFrame:
:return:
'''
fwriteFile = open(filenameWithPath, 'w')
nrow = h2oFrame.nrow
# write the arff headers here
writeWords = "@RELATION "+filename+'\n\n'
fwriteFile.write(writeWords)
typesDict = h2oFrame.types
colnames = h2oFrame.names
uuidtypes = len(uuidNames)*["UUID"]
for cname in colnames:
writeWords = "@ATTRIBUTE "+cname
if typesDict[cname]==u'int':
writeWords = writeWords + " integer"
elif typesDict[cname]==u'time':
writeWords = writeWords + " date"
else:
writeWords = writeWords + " "+typesDict[cname]
fwriteFile.write(writeWords)
fwriteFile.write('\n')
for cindex in range(len(uuidNames)):
writeWords = "@ATTRIBUTE " +uuidNames[cindex]+" uuid"
fwriteFile.write(writeWords)
fwriteFile.write('\n')
fwriteFile.write("\n@DATA\n")
# write the arff body as csv
fdataframe = h2oFrame.as_data_frame(use_pandas=False)
for rowindex in range(1,nrow+1):
writeWords = ""
for cindex in range(h2oFrame.ncol):
if len(fdataframe[rowindex][cindex])>0:
if typesDict[colnames[cindex]]==u'time':
writeWords = writeWords+\
str(datetime.datetime.fromtimestamp(float(fdataframe[rowindex][cindex])/1000.0))+","
elif typesDict[colnames[cindex]] in [u'enum', u'string']:
writeWords=writeWords+fdataframe[rowindex][cindex]+","
else:
writeWords=writeWords+fdataframe[rowindex][cindex]+","
else:
writeWords = writeWords + ","
# process the uuid ones
for cindex in range(len(uuidVecs)-1):
writeWords=writeWords+str(uuidVecs[cindex][rowindex-1])+","
writeWords=writeWords+str(uuidVecs[-1][rowindex-1])+'\n'
fwriteFile.write(writeWords)
fwriteFile.close()
def checkCorrectSkips(originalFullFrame, csvfile, skipped_columns):
skippedFrameUF = h2o.upload_file(csvfile, skipped_columns=skipped_columns)
skippedFrameIF = h2o.import_file(csvfile, skipped_columns=skipped_columns) # this two frames should be the same
compare_frames_local(skippedFrameUF, skippedFrameIF, prob=0.5)
skipCounter = 0
typeDict = originalFullFrame.types
frameNames = originalFullFrame.names
for cindex in range(len(frameNames)):
if cindex not in skipped_columns:
print("Checking column {0}...".format(cindex))
if typeDict[frameNames[cindex]] == u'enum':
compare_frames_local_onecolumn_NA_enum(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1, tol=1e-10,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'string':
compare_frames_local_onecolumn_NA_string(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1,
returnResult=False)
else:
compare_frames_local_onecolumn_NA(originalFullFrame[cindex], skippedFrameIF[skipCounter],
prob=1, tol=1e-10, returnResult=False)
skipCounter = skipCounter + 1
def checkCorrectSkipsFolder(originalFullFrame, csvfile, skipped_columns):
skippedFrameIF = h2o.import_file(csvfile, skipped_columns=skipped_columns) # this two frames should be the same
skipCounter = 0
typeDict = originalFullFrame.types
frameNames = originalFullFrame.names
for cindex in range(len(frameNames)):
if cindex not in skipped_columns:
print("Checking column {0}...".format(cindex))
if typeDict[frameNames[cindex]] == u'enum':
compare_frames_local_onecolumn_NA_enum(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1, tol=1e-10,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'string':
compare_frames_local_onecolumn_NA_string(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1,
returnResult=False)
else:
compare_frames_local_onecolumn_NA(originalFullFrame[cindex], skippedFrameIF[skipCounter],
prob=1, tol=1e-10, returnResult=False)
skipCounter = skipCounter + 1
def assertModelColNamesTypesCorrect(modelNames, modelTypes, frameNames, frameTypesDict):
fName = list(frameNames)
mName = list(modelNames)
assert fName.sort() == mName.sort(), "Expected column names {0}, actual column names {1} and they" \
" are different".format(frameNames, modelNames)
for ind in range(len(frameNames)):
if modelTypes[modelNames.index(frameNames[ind])].lower()=="numeric":
assert (frameTypesDict[frameNames[ind]].lower()=='real') or \
(frameTypesDict[frameNames[ind]].lower()=='int'), \
"Expected training data types for column {0} is {1}. Actual training data types for column {2} from " \
"model output is {3}".format(frameNames[ind], frameTypesDict[frameNames[ind]],
frameNames[ind], modelTypes[modelNames.index(frameNames[ind])])
else:
assert modelTypes[modelNames.index(frameNames[ind])].lower()==frameTypesDict[frameNames[ind]].lower(), \
"Expected training data types for column {0} is {1}. Actual training data types for column {2} from " \
"model output is {3}".format(frameNames[ind], frameTypesDict[frameNames[ind]],
frameNames[ind], modelTypes[modelNames.index(frameNames[ind])])
def saveModelMojo(model):
'''
Given a H2O model, this function will save it in a directory off the results directory. In addition, it will
return the absolute path of where the mojo file is.
:param model:
:return:
'''
# save model
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
MOJONAME = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), "..", "results", MOJONAME))
os.makedirs(tmpdir)
model.download_mojo(path=tmpdir) # save mojo
return tmpdir
# This file will contain functions used by GLM test only.
def assertEqualRegPaths(keys, pathList, index, onePath, tol=1e-6):
for oneKey in keys:
if (pathList[oneKey] != None):
assert abs(pathList[oneKey][index]-onePath[oneKey][0]) < tol, \
"Expected value: {0}, Actual: {1}".format(pathList[oneKey][index], onePath[oneKey][0])
def assertEqualCoeffDicts(coef1Dict, coef2Dict, tol = 1e-6):
assert len(coef1Dict) == len(coef2Dict), "Length of first coefficient dict: {0}, length of second coefficient " \
"dict: {1} and they are different.".format(len(coef1Dict, len(coef2Dict)))
for key in coef1Dict:
val1 = coef1Dict[key]
val2 = coef2Dict[key]
if (math.isnan(val1)):
assert math.isnan(val2), "Coefficient for {0} from first dict: {1}, from second dict: {2} are different." \
"".format(key, coef1Dict[key], coef2Dict[key])
elif (math.isinf(val1)):
assert math.isinf(val2), "Coefficient for {0} from first dict: {1}, from second dict: {2} are different." \
"".format(key, coef1Dict[key], coef2Dict[key])
else:
assert abs(coef1Dict[key] - coef2Dict[key]) < tol, "Coefficient for {0} from first dict: {1}, from second" \
" dict: {2} and they are different.".format(key,
coef1Dict[
key],
coef2Dict[
key])
def assertEqualModelMetrics(metrics1, metrics2, tol = 1e-6,
keySet=["MSE", "AUC", "Gini", "null_deviance", "logloss", "RMSE",
"pr_auc", "r2"]):
# 1. Check model types
model1_type = metrics1.__class__.__name__
model2_type = metrics2.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model metric is of type {0} and the second " \
"model metric is of type {1}.".format(model1_type, model2_type)
metricDict1 = metrics1._metric_json
metricDict2 = metrics2._metric_json
for key in keySet:
if key in metricDict1.keys() and (isinstance(metricDict1[key], float)): # only compare floating point metrics
assert abs(metricDict1[key]-metricDict2[key])/max(1,max(metricDict1[key],metricDict2[key])) < tol, \
"ModelMetric {0} from model 1, {1} from model 2 are different.".format(metricDict1[key],metricDict2[key])
# When an array of alpha and/or lambdas are given, a list of submodels are also built. For each submodel built, only
# the coefficients, lambda/alpha/deviance values are returned. The model metrics is calculated from the submodel
# with the best deviance.
#
# In this test, in addition, we build separate models using just one lambda and one alpha values as when building one
# submodel. In theory, the coefficients obtained from the separate models should equal to the submodels. We check
# and compare the followings:
# 1. coefficients from submodels and individual model should match when they are using the same alpha/lambda value;
# 2. training metrics from alpha array should equal to the individual model matching the alpha/lambda value;
def compareSubmodelsNindividualModels(modelWithArray, trainingData, xarray, yindex):
best_submodel_index = modelWithArray._model_json["output"]["best_submodel_index"]
r = H2OGeneralizedLinearEstimator.getGLMRegularizationPath(modelWithArray) # contains all lambda/alpha values of submodels trained.
submodel_num = len(r["lambdas"])
regKeys = ["alphas", "lambdas", "explained_deviance_valid", "explained_deviance_train"]
for submodIndx in range(submodel_num): # manually build glm model and compare to those built before
modelGLM = H2OGeneralizedLinearEstimator(family='binomial', alpha=[r["alphas"][submodIndx]], Lambda=[r["lambdas"][submodIndx]])
modelGLM.train(training_frame=trainingData, x=xarray, y=yindex)
# check coefficients between submodels and model trained with same parameters
assertEqualCoeffDicts(r["coefficients"][submodIndx], modelGLM.coef())
modelGLMr = H2OGeneralizedLinearEstimator.getGLMRegularizationPath(modelGLM) # contains one item only
assertEqualRegPaths(regKeys, r, submodIndx, modelGLMr)
if (best_submodel_index == submodIndx): # check training metrics of modelGLM should equal that of m since it is the best subModel
assertEqualModelMetrics(modelWithArray._model_json["output"]["training_metrics"],
modelGLM._model_json["output"]["training_metrics"])
assertEqualCoeffDicts(modelWithArray.coef(), modelGLM.coef()) # model coefficient should come from best submodel
else: # check and make sure best_submodel_index has lowest deviance
assert modelGLM.residual_deviance() - modelWithArray.residual_deviance() >= 0, \
"Individual model has better residual_deviance than best submodel!"
def extractNextCoeff(cs_norm, orderedCoeffNames, startVal):
for ind in range(0, len(startVal)):
startVal[ind] = cs_norm[orderedCoeffNames[ind]]
return startVal
def assertEqualScoringHistoryIteration(model_long, model_short, col_list_compare, tolerance=1e-6):
scoring_history_long = model_long._model_json["output"]["scoring_history"]
scoring_history_short = model_short._model_json["output"]["scoring_history"]
cv_4th_len = len(scoring_history_short.cell_values) - 1 # ignore last iteration, scoring is performed at different spots
cv_len = len(scoring_history_long.cell_values)
col_2D = scoring_history_short.col_header
iterInd = col_2D.index('iterations')
count = 0
for index in range(cv_4th_len):
iterInd4th = scoring_history_short.cell_values[index][iterInd]
iterIndlong = scoring_history_long.cell_values[count][iterInd]
while not(iterInd4th == None) and (iterInd4th > iterIndlong):
count = count+1
if count >= cv_len:
break
iterIndlong = scoring_history_long.cell_values[count][iterInd]
if not(iterInd4th == None) and not(iterInd4th == '') and (iterInd4th == iterIndlong):
for col_header in col_list_compare:
ind = col_2D.index(col_header)
val_short = scoring_history_short.cell_values[index][ind]
val_long = scoring_history_long.cell_values[count][ind]
if not(val_short == '' or math.isnan(val_short) or val_long == '' or math.isnan(val_long)):
assert abs(scoring_history_short.cell_values[index][ind]-
scoring_history_long.cell_values[count][ind]) < tolerance, \
"{0} expected: {1}, actual: {2}".format(col_header, scoring_history_short.cell_values[index][ind],
scoring_history_long.cell_values[count][ind])
count = count+1
def assertCoefEqual(regCoeff, coeff, coeffClassSet, tol=1e-6):
for key in regCoeff:
temp = key.split('_')
classInd = int(temp[1])
val1 = regCoeff[key]
val2 = coeff[coeffClassSet[classInd]][temp[0]]
assert type(val1)==type(val2), "type of coeff1: {0}, type of coeff2: {1}".format(type(val1), type(val2))
diff = abs(val1-val2)
print("val1: {0}, val2: {1}, tol: {2}".format(val1, val2, tol))
assert diff < tol, "diff {0} exceeds tolerance {1}.".format(diff, tol)
def assertCoefDictEqual(regCoeff, coeff, tol=1e-6):
for key in regCoeff:
val1 = regCoeff[key]
val2 = coeff[key]
assert type(val1)==type(val2), "type of coeff1: {0}, type of coeff2: {1}".format(type(val1), type(val2))
diff = abs(val1-val2)
assert diff < tol, "diff {0} exceeds tolerance {1}.".format(diff, tol)
def assert_equals(expected, actual, message=""):
assert expected == actual, ("{0}\nexpected:{1}\nactual\t:{2}".format(message, expected, actual))
|
zxjzxj9/FlaskBoard | refs/heads/master | web/lib/python2.7/site-packages/setuptools/command/__init__.py | 101 | __all__ = [
'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
'register', 'bdist_wininst', 'upload_docs', 'upload',
]
from distutils.command.bdist import bdist
import sys
from setuptools.command import install_scripts
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
|
City-of-Helsinki/kerrokantasi | refs/heads/master | democracy/__init__.py | 3 | default_app_config = 'democracy.apps.DemocracyAppConfig'
|
bzbarsky/servo | refs/heads/master | components/script/dom/bindings/codegen/parser/tests/test_putForwards.py | 142 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface I {
[PutForwards=B] readonly attribute long A;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface I {
[PutForwards=B] readonly attribute J A;
};
interface J {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface I {
[PutForwards=B] attribute J A;
};
interface J {
attribute long B;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface I {
[PutForwards=B] static readonly attribute J A;
};
interface J {
attribute long B;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset();
threw = False
try:
parser.parse("""
callback interface I {
[PutForwards=B] readonly attribute J A;
};
interface J {
attribute long B;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface I {
[PutForwards=C] readonly attribute J A;
[PutForwards=C] readonly attribute J B;
};
interface J {
[PutForwards=D] readonly attribute K C;
};
interface K {
[PutForwards=A] readonly attribute I D;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
niknow/scipy | refs/heads/master | scipy/io/matlab/tests/test_byteordercodes.py | 126 | ''' Tests for byteorder module '''
from __future__ import division, print_function, absolute_import
import sys
from numpy.testing import assert_raises, assert_, run_module_suite
import scipy.io.matlab.byteordercodes as sibc
def test_native():
native_is_le = sys.byteorder == 'little'
assert_(sibc.sys_is_le == native_is_le)
def test_to_numpy():
if sys.byteorder == 'little':
assert_(sibc.to_numpy_code('native') == '<')
assert_(sibc.to_numpy_code('swapped') == '>')
else:
assert_(sibc.to_numpy_code('native') == '>')
assert_(sibc.to_numpy_code('swapped') == '<')
assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
assert_(sibc.to_numpy_code('big') == '>')
for code in ('little', '<', 'l', 'L', 'le'):
assert_(sibc.to_numpy_code(code) == '<')
for code in ('big', '>', 'b', 'B', 'be'):
assert_(sibc.to_numpy_code(code) == '>')
assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
if __name__ == "__main__":
run_module_suite()
|
patrabu/dl120th | refs/heads/master | dat2db.py | 1 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# dat2db - Put content of dat file from the Voltcraft DL-120TH into DB.
#
# Copyright 2012, 2015 Patrick Rabu
import argparse
import sys
import sqlite3
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Insert content of dat file into sqlite DB.',
prog='dat2db.py', version='0.1')
parser.add_argument(
'-f', '--filename', help='Name of the data file.')
parser.add_argument(
'-d', '--database', help='Name of the database file.')
args = parser.parse_args()
print("Filename=", args.filename)
print("Database=", args.database)
commandOk = True
if args.filename is None:
print("Filename is mandatory.")
commandOk = False
else:
filename = args.filename
if args.database is None:
print("DataBase name is mandatory.")
commandOk = False
else:
dbname = args.database
if not commandOk:
print("Command line error...")
sys.exit(2)
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute(
'''CREATE TABLE IF NOT EXISTS
sensors (logger text, dt text, temp real, hygro real)''')
with open(filename) as datfile:
# Read the first line
line = datfile.readline()
print(line)
words = line.split()
print(words[0], "-", words[1], "-", words[2],
"-", words[3], "-", words[4])
logger_name = words[1]
for line in datfile:
words = line.split()
print(words[0], "-", words[3], "-", words[4])
c.execute(
'INSERT INTO sensors VALUES (?, ?, ?, ?)',
(logger_name, words[0], words[3], words[4]))
conn.commit()
conn.close()
sys.exit(0)
|
xinjiguaike/edx-platform | refs/heads/master | openedx/core/djangoapps/credit/signature.py | 69 | """
Calculate digital signatures for messages sent to/from credit providers,
using a shared secret key.
The signature is calculated as follows:
1) Encode all parameters of the request (except the signature) in a string.
2) Encode each key/value pair as a string of the form "{key}:{value}".
3) Concatenate key/value pairs in ascending alphabetical order by key.
4) Calculate the HMAC-SHA256 digest of the encoded request parameters, using a 32-character shared secret key.
5) Encode the digest in hexadecimal.
It is the responsibility of the credit provider to check the signature of messages
we send them, and it is our responsibility to check the signature of messages
we receive from the credit provider.
"""
import logging
import hashlib
import hmac
from django.conf import settings
log = logging.getLogger(__name__)
def get_shared_secret_key(provider_id):
"""
Retrieve the shared secret key for a particular credit provider.
"""
secret = getattr(settings, "CREDIT_PROVIDER_SECRET_KEYS", {}).get(provider_id)
if isinstance(secret, unicode):
try:
secret = str(secret)
except UnicodeEncodeError:
secret = None
log.error(u'Shared secret key for credit provider "%s" contains non-ASCII unicode.', provider_id)
return secret
def signature(params, shared_secret):
"""
Calculate the digital signature for parameters using a shared secret.
Arguments:
params (dict): Parameters to sign. Ignores the "signature" key if present.
shared_secret (str): The shared secret string.
Returns:
str: The 32-character signature.
"""
encoded_params = "".join([
"{key}:{value}".format(key=key, value=params[key])
for key in sorted(params.keys())
if key != "signature"
])
hasher = hmac.new(shared_secret, encoded_params, hashlib.sha256)
return hasher.hexdigest()
|
jackm/Kijiji-Repost-Headless | refs/heads/master | kijiji_repost_headless/get_ids.py | 1 | import json
import requests
def find_where(d, area_id=None):
"""
:param d: dictionary available here: http://www.kijiji.ca/j-locations.json
:param area_id: string, parent's region's ID
:return: tuple, containing the location id and area id of the selected_dict region
"""
list_of_dicts = sorted(d['children'], key=lambda k: k['nameEn'])
print() # empty space
# print a numbered list (starting with number 1) of all the regions
for num, dictionary in enumerate(list_of_dicts, 1):
print(num, "-", dictionary['nameEn'])
# make sure the input is a number and a valid index
while True:
index = input('\nWhere are you? ')
if index.isdigit():
if 0 < int(index) <= len(list_of_dicts):
selected_dict = list_of_dicts[int(index) - 1]
break
print("Enter a valid number!")
# if the selected dictionary has children, we list it again
if len(selected_dict['children']) > 0:
return find_where(selected_dict, selected_dict['id'])
# else we return the location
print("Here's your location ID:", selected_dict['id'])
print("And your location area:", area_id)
return selected_dict['id'], area_id
def get_location_and_area_ids():
locations_url = 'http://www.kijiji.ca/j-locations.json'
locations_page = requests.get(locations_url)
# get rid of javascript variable name and trailing semi-colon
locations_data = locations_page.text.split(" = ")[1].strip()[:-1]
locations_dict = json.loads(locations_data)
return find_where(locations_dict)
if __name__ == "__main__":
get_location_and_area_ids()
|
formiano/enigma2-4.4 | refs/heads/master | lib/python/Plugins/SystemPlugins/DeviceManager/HddMount.py | 16 | # for localized messages
from . import _
from enigma import *
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.GUIComponent import GUIComponent
from Components.HTMLComponent import HTMLComponent
from Tools.Directories import fileExists, crawlDirectory, resolveFilename, SCOPE_CURRENT_PLUGIN
from Tools.LoadPixmap import LoadPixmap
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Button import Button
from Components.Label import Label
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from MountPoints import MountPoints
from Disks import Disks
from ExtraMessageBox import ExtraMessageBox
from boxbranding import getMachineBrand, getMachineName
import os
import sys
import re
class HddMount(Screen):
skin = """
<screen name="HddMount" position="center,center" size="560,430" title="Hard Drive Mount">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="key_blue" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="menu" position="20,45" scrollbarMode="showOnDemand" size="520,380" transparent="1" />
</screen>"""
def __init__(self, session, device, partition):
Screen.__init__(self, session)
self.device = device
self.partition = partition
self.mountpoints = MountPoints()
self.mountpoints.read()
self.fast = False
self.list = []
self.list.append(_("Mount as main hdd"))
self.list.append(_("Mount as /media/hdd1"))
self.list.append(_("Mount as /media/hdd2"))
self.list.append(_("Mount as /media/hdd3"))
self.list.append(_("Mount as /media/hdd4"))
self.list.append(_("Mount as /media/hdd5"))
self.list.append(_("Mount as /media/usb"))
self.list.append(_("Mount as /media/usb1"))
self.list.append(_("Mount as /media/usb2"))
self.list.append(_("Mount as /media/usb3"))
self.list.append(_("Mount as /media/usb4"))
self.list.append(_("Mount as /media/usb5"))
self.list.append(_("Mount on custom path"))
self["menu"] = MenuList(self.list)
self["key_red"] = Button(_("Fixed mount"))
self["key_green"] = Button(_("Fast mount"))
self["key_blue"] = Button(_("Exit"))
self["key_yellow"] = Button("")
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"green": self.green,
"red": self.ok,
"cancel": self.quit,
}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Mountpoints"))
def ok(self):
self.fast = False
selected = self["menu"].getSelectedIndex()
if selected == 0:
self.setMountPoint("/media/hdd")
elif selected == 1:
self.setMountPoint("/media/hdd1")
elif selected == 2:
self.setMountPoint("/media/hdd2")
elif selected == 3:
self.setMountPoint("/media/hdd3")
elif selected == 4:
self.setMountPoint("/media/hdd4")
elif selected == 5:
self.setMountPoint("/media/hdd5")
elif selected == 6:
self.setMountPoint("/media/usb")
elif selected == 7:
self.setMountPoint("/media/usb1")
elif selected == 8:
self.setMountPoint("/media/usb2")
elif selected == 9:
self.setMountPoint("/media/usb3")
elif selected == 10:
self.setMountPoint("/media/usb4")
elif selected == 11:
self.setMountPoint("/media/usb5")
elif selected == 12:
self.session.openWithCallback(self.customPath, VirtualKeyBoard, title = (_("Insert mount point:")), text = _("/media/custom"))
def green(self):
self.fast = True
selected = self["menu"].getSelectedIndex()
if selected == 0:
self.setMountPoint("/media/hdd")
elif selected == 1:
self.setMountPoint("/media/hdd1")
elif selected == 2:
self.setMountPoint("/media/hdd2")
elif selected == 3:
self.setMountPoint("/media/hdd3")
elif selected == 4:
self.setMountPoint("/media/hdd4")
elif selected == 5:
self.setMountPoint("/media/hdd5")
elif selected == 6:
self.setMountPoint("/media/usb")
elif selected == 7:
self.setMountPoint("/media/usb1")
elif selected == 8:
self.setMountPoint("/media/usb2")
elif selected == 9:
self.setMountPoint("/media/usb3")
elif selected == 10:
self.setMountPoint("/media/usb4")
elif selected == 11:
self.setMountPoint("/media/usb5")
elif selected == 12:
self.session.openWithCallback(self.customPath, VirtualKeyBoard, title = (_("Insert mount point:")), text = _("/media/custom"))
def customPath(self, result):
if result and len(result) > 0:
result = result.rstrip("/")
os.system("mkdir -p %s" % result)
self.setMountPoint(result)
def setMountPoint(self, path):
self.cpath = path
if self.mountpoints.exist(path):
self.session.openWithCallback(self.setMountPointCb, ExtraMessageBox, _("Selected mount point is already used by another drive."), _("Mount point exist!"),
[ [ _("Change old drive with this new drive"), "ok.png" ],
[ _("Keep old drive"), "cancel.png" ],
])
else:
self.setMountPointCb(0)
def setMountPointCb(self, result):
if result == 0:
if self.mountpoints.isMounted(self.cpath):
if not self.mountpoints.umount(self.cpath):
self.session.open(MessageBox, _("Cannot umount current drive.\nA record in progress, timeshift or some external tools (like samba, swapfile and nfsd) may cause this problem.\nPlease stop this actions/applications and try again"), MessageBox.TYPE_ERROR)
self.close()
return
self.mountpoints.delete(self.cpath)
if not self.fast:
self.mountpoints.add(self.device, self.partition, self.cpath)
self.mountpoints.write()
if not self.mountpoints.mount(self.device, self.partition, self.cpath):
self.session.open(MessageBox, _("Cannot mount new drive.\nPlease check filesystem or format it and try again"), MessageBox.TYPE_ERROR)
elif self.cpath == "/media/hdd":
os.system("/bin/mkdir -p /media/hdd/movie")
if not self.fast:
message = _("Device Fixed Mount Point change needs a system restart in order to take effect.\nRestart your %s %s now?") % (getMachineBrand(), getMachineName())
mbox = self.session.openWithCallback(self.restartBox, MessageBox, message, MessageBox.TYPE_YESNO)
mbox.setTitle(_("Restart %s %s") % (getMachineBrand(), getMachineName()))
else:
self.close()
def restartBox(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 2)
else:
self.close()
def quit(self):
self.close()
def MountEntry(description, details):
picture = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/diskusb.png"));
return (picture, description, details)
class HddFastRemove(Screen):
skin = """
<screen name="HddFastRemove" position="center,center" size="560,430" title="Hard Drive Fast Umount">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="140,0" size="140,40" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_blue" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="menu" render="Listbox" position="10,55" size="520,380" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (5, 0), size = (48, 48), png = 0),
MultiContentEntryText(pos = (65, 3), size = (190, 38), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1),
MultiContentEntryText(pos = (165, 27), size = (290, 38), font=1, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 2),
],
"fonts": [gFont("Regular", 22), gFont("Regular", 18)],
"itemHeight": 50
}
</convert>
</widget>
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.refreshMP(False)
self["menu"] = List(self.disks)
self["key_red"] = Button(_("Unmount"))
self["key_blue"] = Button(_("Exit"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"red": self.red,
"cancel": self.quit,
}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Fast Mounted Remove"))
def red(self):
if len(self.mounts) > 0:
self.sindex = self["menu"].getIndex()
self.mountpoints.umount(self.mounts[self.sindex]) # actually umount device here - also check both cases possible - for instance error case also check with stay in /e.g. /media/usb folder on telnet
self.session.open(MessageBox, _("Fast mounted Media unmounted.\nYou can safely remove the Device now, if no further Partitions (displayed as P.x on Devicelist - where x >=2) are mounted on the same Device.\nPlease unmount Fixed Mounted Devices with Device Manager Panel!"), MessageBox.TYPE_INFO)
self.refreshMP(True)
def refreshMP(self, uirefresh = True):
self.mdisks = Disks()
self.mountpoints = MountPoints()
self.mountpoints.read()
self.disks = list ()
self.mounts = list ()
for disk in self.mdisks.disks:
if disk[2] == True:
diskname = disk[3]
for partition in disk[5]:
mp = ""
rmp = ""
try:
mp = self.mountpoints.get(partition[0][:3], int(partition[0][3:]))
rmp = self.mountpoints.getRealMount(partition[0][:3], int(partition[0][3:]))
except Exception, e:
pass
if len(mp) > 0:
self.disks.append(MountEntry(disk[3], "P.%s (Fixed: %s)" % (partition[0][3:], mp)))
self.mounts.append(mp)
elif len(rmp) > 0:
self.disks.append(MountEntry(disk[3], "P.%s (Fast: %s)" % (partition[0][3:], rmp)))
self.mounts.append(rmp)
if uirefresh:
self["menu"].setList(self.disks)
def quit(self):
self.close()
|
jessedhillon/zulip | refs/heads/master | zerver/test_hooks.py | 103 | # -*- coding: utf-8 -*-
from zerver.lib.test_helpers import AuthedTestCase
from zerver.lib.test_runner import slow
from zerver.models import Message
import ujson
class JiraHookTests(AuthedTestCase):
def send_jira_message(self, action):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
return self.send_json_payload(email,
url,
self.fixture_data('jira', action),
stream_name="jira",
content_type="application/json")
def test_unknown(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
result = self.client.post(url, self.fixture_data('jira', 'unknown'),
stream_name="jira",
content_type="application/json")
self.assert_json_error(result, 'Unknown JIRA event type')
def test_custom_stream(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
action = 'created'
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
msg = self.send_json_payload(email, url,
self.fixture_data('jira', action),
stream_name="jira_custom",
content_type="application/json")
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created(self):
msg = self.send_jira_message('created')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created_assignee(self):
msg = self.send_jira_message('created_assignee')
self.assertEqual(msg.subject, "TEST-4: Test Created Assignee")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **created** [TEST-4](https://zulipp.atlassian.net/browse/TEST-4) priority Major, assigned to **Leonardo Franchi [Administrator]**:
> Test Created Assignee""")
def test_commented(self):
msg = self.send_jira_message('commented')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
Adding a comment. Oh, what a comment it is!
""")
def test_commented_markup(self):
msg = self.send_jira_message('commented_markup')
self.assertEqual(msg.subject, "TEST-7: Testing of rich text")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-7](https://zulipp.atlassian.net/browse/TEST-7):\n\n\nThis is a comment that likes to **exercise** a lot of _different_ `conventions` that `jira uses`.\r\n\r\n~~~\n\r\nthis code is not highlighted, but monospaced\r\n\n~~~\r\n\r\n~~~\n\r\ndef python():\r\n print "likes to be formatted"\r\n\n~~~\r\n\r\n[http://www.google.com](http://www.google.com) is a bare link, and [Google](http://www.google.com) is given a title.\r\n\r\nThanks!\r\n\r\n~~~ quote\n\r\nSomeone said somewhere\r\n\n~~~\n""")
def test_deleted(self):
msg = self.send_jira_message('deleted')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, "Leo Franchi **deleted** [BUG-15](http://lfranchi.com:8080/browse/BUG-15)!")
def test_reassigned(self):
msg = self.send_jira_message('reassigned')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
* Changed assignee from **None** to @**Othello, the Moor of Venice**
""")
def test_reopened(self):
msg = self.send_jira_message('reopened')
self.assertEqual(msg.subject, "BUG-7: More cowbell polease")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-7](http://lfranchi.com:8080/browse/BUG-7) (assigned to @**Othello, the Moor of Venice**):
* Changed resolution from **Fixed** to **None**
* Changed status from **Resolved** to **Reopened**
Re-opened yeah!
""")
def test_resolved(self):
msg = self.send_jira_message('resolved')
self.assertEqual(msg.subject, "BUG-13: Refreshing the page loses the user's current posi...")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-13](http://lfranchi.com:8080/browse/BUG-13) (assigned to @**Othello, the Moor of Venice**):
* Changed status from **Open** to **Resolved**
* Changed assignee from **None** to @**Othello, the Moor of Venice**
* Changed resolution from **None** to **Fixed**
Fixed it, finally!
""")
def test_workflow_postfuncion(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction_started(self):
msg = self.send_jira_message('postfunction_started')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Underway""")
def test_workflow_postfunction_resolved(self):
msg = self.send_jira_message('postfunction_resolved')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Resolved""")
def test_mention(self):
msg = self.send_jira_message('watch_mention_updated')
self.assertEqual(msg.subject, "TEST-5: Lunch Decision Needed")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-5](https://zulipp.atlassian.net/browse/TEST-5) (assigned to @**Othello, the Moor of Venice**):
Making a comment, @**Othello, the Moor of Venice** is watching this issue
""")
def test_priority_updated(self):
msg = self.send_jira_message('updated_priority')
self.assertEqual(msg.subject, "TEST-1: Fix That")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-1](https://zulipp.atlassian.net/browse/TEST-1) (assigned to **leo@zulip.com**):
* Changed priority from **Critical** to **Major**
""")
class BeanstalkHookTests(AuthedTestCase):
def send_beanstalk_message(self, action):
email = "hamlet@zulip.com"
data = {'payload': self.fixture_data('beanstalk', action)}
return self.send_json_payload(email, "/api/v1/external/beanstalk",
data,
stream_name="commits",
**self.api_auth(email))
def test_git_single(self):
msg = self.send_beanstalk_message('git_singlecommit')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [e50508d](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/e50508df): add some stuff
""")
@slow(0.20, "lots of queries")
def test_git_multiple(self):
msg = self.send_beanstalk_message('git_multiple')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [edf529c](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/edf529c7): Added new file
* [c2a191b](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/c2a191b9): Filled in new file with some stuff
* [2009815](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/20098158): More work to fix some bugs
""")
def test_svn_addremove(self):
msg = self.send_beanstalk_message('svn_addremove')
self.assertEqual(msg.subject, "svn r3")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 3](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/3):
> Removed a file and added another one!""")
def test_svn_changefile(self):
msg = self.send_beanstalk_message('svn_changefile')
self.assertEqual(msg.subject, "svn r2")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 2](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/2):
> Added some code""")
class GithubV1HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v1_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v1_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class GithubV2HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v2_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v2_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class PivotalV3HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', name, file_type='xml'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi accepted "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, 'Comment added')
self.assertEqual(msg.content, 'Leo Franchi added comment: "FIX THIS NOW" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi added "My new Feature story" \
(unscheduled feature):\n\n~~~ quote\nThis is my long description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi delivered "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi finished "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi rejected "Another new story" with comments: \
"Not good enough, sorry" [(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi started "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi added "Another new story" \
(unscheduled feature worth 2 story points):\n\n~~~ quote\nSome loong description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
class PivotalV5HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', "v5_" + name, file_type='json'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **unstarted** to **accepted**
""")
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi added a comment to [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
~~~quote
A comment on the story
~~~""")
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, '#63495662: Story that I created')
self.assertEqual(msg.content, """Leo Franchi created bug: [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story that I created](http://www.pivotaltracker.com/story/show/63495662)
* State is **unscheduled**
* Description is
> What a description""")
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **accepted** to **delivered**
""")
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **delivered** to **accepted**
""")
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi moved [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066) from **unstarted** to **unscheduled**""")
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* Comment added:
~~~quote
Try again next time
~~~
* state changed from **delivered** to **rejected**
""")
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, '#63495972: Fresh Story')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Fresh Story](http://www.pivotaltracker.com/story/show/63495972):
* state changed from **unstarted** to **started**
""")
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate is now **3 points**
""")
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate changed from 3 to **0 points**
* type changed from **feature** to **bug**
""")
class NewRelicHookTests(AuthedTestCase):
def send_new_relic_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/newrelic?api_key=%s&stream=%s" % (api_key,"newrelic"),
self.fixture_data('newrelic', name, file_type='txt'),
stream_name="newrelic",
content_type="application/x-www-form-urlencoded")
def test_alert(self):
msg = self.send_new_relic_message('alert')
self.assertEqual(msg.subject, "Apdex score fell below critical level of 0.90")
self.assertEqual(msg.content, 'Alert opened on [application name]: \
Apdex score fell below critical level of 0.90\n\
[View alert](https://rpm.newrelc.com/accounts/[account_id]/applications/[application_id]/incidents/[incident_id])')
def test_deployment(self):
msg = self.send_new_relic_message('deployment')
self.assertEqual(msg.subject, 'Test App deploy')
self.assertEqual(msg.content, '`1242` deployed by **Zulip Test**\n\
Description sent via curl\n\nChangelog string')
class StashHookTests(AuthedTestCase):
def test_stash_message(self):
"""
Messages are generated by Stash on a `git push`.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
email = "hamlet@zulip.com"
msg = self.send_json_payload(
email, "/api/v1/external/stash?stream=commits",
self.fixture_data("stash", "push", file_type="json"),
stream_name="commits",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
self.assertEqual(msg.subject, u"Secret project/Operation unicorn: master")
self.assertEqual(msg.content, """`f259e90` was pushed to **master** in **Secret project/Operation unicorn** with:
* `f259e90`: Updating poms ...""")
class FreshdeskHookTests(AuthedTestCase):
def generate_webhook_response(self, fixture):
"""
Helper function to handle the webhook boilerplate.
"""
email = "hamlet@zulip.com"
return self.send_json_payload(
email, "/api/v1/external/freshdesk?stream=freshdesk",
self.fixture_data("freshdesk", fixture, file_type="json"),
stream_name="freshdesk",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
def test_ticket_creation(self):
"""
Messages are generated on ticket creation through Freshdesk's
"Dispatch'r" service.
"""
msg = self.generate_webhook_response("ticket_created")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, u"""Requester ☃ Bob <requester-bob@example.com> created [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
~~~ quote
Test ticket description ☃.
~~~
Type: **Incident**
Priority: **High**
Status: **Pending**""")
def test_status_change(self):
"""
Messages are generated when a ticket's status changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("status_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Status: **Resolved** => **Waiting on Customer**""")
def test_priority_change(self):
"""
Messages are generated when a ticket's priority changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("priority_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Priority: **High** => **Low**""")
def note_change(self, fixture, note_type):
"""
Messages are generated when a note gets added to a ticket through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response(fixture)
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> added a %s note to [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11).""" % (note_type,))
def test_private_note_change(self):
self.note_change("private_note", "private")
def test_public_note_change(self):
self.note_change("public_note", "public")
def test_inline_image(self):
"""
Freshdesk sends us descriptions as HTML, so we have to make the
descriptions Zulip markdown-friendly while still doing our best to
preserve links and images.
"""
msg = self.generate_webhook_response("inline_images")
self.assertEqual(msg.subject, u"#12: Not enough ☃ guinea pigs")
self.assertIn("[guinea_pig.png](http://cdn.freshdesk.com/data/helpdesk/attachments/production/12744808/original/guinea_pig.png)", msg.content)
class ZenDeskHookTests(AuthedTestCase):
def generate_webhook_response(self, ticket_title='User can\'t login',
ticket_id=54, message='Message',
stream_name='zendesk'):
data = {
'ticket_title': ticket_title,
'ticket_id': ticket_id,
'message': message,
'stream': stream_name,
}
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
result = self.client.post('/api/v1/external/zendesk', data,
**self.api_auth(email))
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket")
self.assertEqual(msg.subject, '#4: Test ticket')
def test_long_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket" + '!' * 80)
self.assertEqual(msg.subject, '#4: Test ticket' + '!' * 42 + '...')
def test_content(self):
msg = self.generate_webhook_response(message='New comment:\n> It is better\n* here')
self.assertEqual(msg.content, 'New comment:\n> It is better\n* here')
class PagerDutyHookTests(AuthedTestCase):
def send_webhook(self, data, stream_name, topic=None):
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
api_key = self.get_api_key(email)
if topic:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s&topic=%s' % (api_key, stream_name, topic)
else:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s' % (api_key, stream_name)
result = self.client.post(url, ujson.dumps(data), content_type="application/json")
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_trigger(self):
data = ujson.loads(self.fixture_data('pagerduty', 'trigger'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_unacknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'unacknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':healthy_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_auto_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'auto_resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 2')
self.assertEqual(
msg.content,
':healthy_heart: Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved\n\n>new'
)
def test_acknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_no_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'mp_fail'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 48219')
self.assertEqual(
msg.content,
u':healthy_heart: Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved\n\n>mp_error_block_down_critical\u2119\u01b4'
)
def test_explicit_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty', topic="my+cool+topic")
self.assertEqual(msg.subject, 'my cool topic')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_bad_message(self):
data = {'messages': [{'type': 'incident.triggered'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'incident.triggered\'}\n```'
)
def test_unknown_message_type(self):
data = {'messages': [{'type': 'foo'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'foo\'}\n```'
)
|
voberoi/python-magento | refs/heads/master | magento/magento_ipython_shell.py | 1 | #!/usr/bin/env python
import sys
import argparse
from magento import MagentoAPI
try:
from IPython import embed
except:
print "You must have IPython installed to use this shell. Try"
print "'pip install ipython', 'easy_install ipython' , or head"
print "over to ipython.org."
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description=\
"Launch an IPython shell with a MagentoAPI instance, 'magento' " + \
"connected to a given endpoint.")
parser.add_argument("host", help="The Magento server host.")
parser.add_argument("port", type=int, help="The Magento server port.")
parser.add_argument("api_user", help="The API user to log in as.")
parser.add_argument("api_key", help="The API key to log in with.")
parser.add_argument("-p", "--path", help="The URL path to your instance's XML-RPC API.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Set the XML-RPC client to verbose.")
args = parser.parse_args()
endpoint = {
"host": args.host,
"port": args.port,
"api_user": args.api_user,
"api_key": args.api_key,
"path": args.path,
"verbose": args.verbose
}
path = args.path if args.path else MagentoAPI.PATH
url = "http://%s:%d" % (args.host, args.port) + path
print
print
print "-- magento-ipython-shell -----------------"
print "Connecting to '%s'" % url
print "Using API user/key %s/%s" % (args.api_user, args.api_key)
magento = MagentoAPI(**endpoint)
print "Connected! The 'magento' variable is bound to a usable MagentoAPI instance."
print "-- magento-ipython-shell -----------------"
print
print
embed() # Shell time!
if __name__ == "__main__":
main()
|
fhamborg/news-please | refs/heads/master | newsplease/helper_classes/class_loader.py | 1 | import importlib
class ClassLoader:
@classmethod
def from_string(cls, class_name):
if "." not in class_name:
raise ImportError("{0} doesn't look like a module path".format(class_name))
module_name = ".".join(class_name.split(".")[:-1])
class_name = class_name.split(".")[-1]
try:
loaded_module = importlib.import_module(module_name)
loaded_class = getattr(loaded_module, class_name)
except AttributeError and ModuleNotFoundError as e:
raise ImportError("Module {0} does not exist or does not define a class named {1}".format(module_name,
class_name)) from e
return loaded_class
|
ychfan/tensorflow | refs/heads/master | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 13 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
|
RAtechntukan/CouchPotatoServer | refs/heads/develop | libs/subliminal/async.py | 106 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .core import (consume_task, LANGUAGE_INDEX, SERVICE_INDEX,
SERVICE_CONFIDENCE, MATCHING_CONFIDENCE, SERVICES, create_list_tasks,
create_download_tasks, group_by_video, key_subtitles)
from .language import language_list, language_set, LANGUAGES
from .tasks import StopTask
import Queue
import logging
import threading
__all__ = ['Worker', 'Pool']
logger = logging.getLogger(__name__)
class Worker(threading.Thread):
"""Consume tasks and put the result in the queue"""
def __init__(self, tasks, results):
super(Worker, self).__init__()
self.tasks = tasks
self.results = results
self.services = {}
def run(self):
while 1:
result = []
try:
task = self.tasks.get(block=True)
if isinstance(task, StopTask):
break
result = consume_task(task, self.services)
self.results.put((task.video, result))
except:
logger.error(u'Exception raised in worker %s' % self.name, exc_info=True)
finally:
self.tasks.task_done()
self.terminate()
logger.debug(u'Thread %s terminated' % self.name)
def terminate(self):
"""Terminate instantiated services"""
for service_name, service in self.services.iteritems():
try:
service.terminate()
except:
logger.error(u'Exception raised when terminating service %s' % service_name, exc_info=True)
class Pool(object):
"""Pool of workers"""
def __init__(self, size):
self.tasks = Queue.Queue()
self.results = Queue.Queue()
self.workers = []
for _ in range(size):
self.workers.append(Worker(self.tasks, self.results))
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self):
"""Start workers"""
for worker in self.workers:
worker.start()
def stop(self):
"""Stop workers"""
for _ in self.workers:
self.tasks.put(StopTask())
def join(self):
"""Join the task queue"""
self.tasks.join()
def collect(self):
"""Collect available results
:return: results of tasks
:rtype: list of :class:`~subliminal.tasks.Task`
"""
results = []
while 1:
try:
result = self.results.get(block=False)
results.append(result)
except Queue.Empty:
break
return results
def list_subtitles(self, paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
"""See :meth:`subliminal.list_subtitles`"""
services = services or SERVICES
languages = language_set(languages) if languages is not None else language_set(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
if any([not isinstance(p, unicode) for p in paths]):
logger.warning(u'Not all entries are unicode')
tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for task in tasks:
self.tasks.put(task)
self.join()
results = self.collect()
return group_by_video(results)
def download_subtitles(self, paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None):
"""See :meth:`subliminal.download_subtitles`"""
services = services or SERVICES
languages = language_list(languages) if languages is not None else language_list(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
subtitles_by_video = self.list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for video, subtitles in subtitles_by_video.iteritems():
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
tasks = create_download_tasks(subtitles_by_video, languages, multi)
for task in tasks:
self.tasks.put(task)
self.join()
results = self.collect()
return group_by_video(results)
|
hgl888/chromium-crosswalk-efl | refs/heads/efl/crosswalk-10/39.0.2171.19 | build/copy_test_data_ios.py | 206 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copies test data files or directories into a given output directory."""
import optparse
import os
import shutil
import sys
class WrongNumberOfArgumentsException(Exception):
pass
def EscapePath(path):
"""Returns a path with spaces escaped."""
return path.replace(" ", "\\ ")
def ListFilesForPath(path):
"""Returns a list of all the files under a given path."""
output = []
# Ignore revision control metadata directories.
if (os.path.basename(path).startswith('.git') or
os.path.basename(path).startswith('.svn')):
return output
# Files get returned without modification.
if not os.path.isdir(path):
output.append(path)
return output
# Directories get recursively expanded.
contents = os.listdir(path)
for item in contents:
full_path = os.path.join(path, item)
output.extend(ListFilesForPath(full_path))
return output
def CalcInputs(inputs):
"""Computes the full list of input files for a set of command-line arguments.
"""
# |inputs| is a list of paths, which may be directories.
output = []
for input in inputs:
output.extend(ListFilesForPath(input))
return output
def CopyFiles(relative_filenames, output_basedir):
"""Copies files to the given output directory."""
for file in relative_filenames:
relative_dirname = os.path.dirname(file)
output_dir = os.path.join(output_basedir, relative_dirname)
output_filename = os.path.join(output_basedir, file)
# In cases where a directory has turned into a file or vice versa, delete it
# before copying it below.
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
os.remove(output_dir)
if os.path.exists(output_filename) and os.path.isdir(output_filename):
shutil.rmtree(output_filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(file, output_filename)
def DoMain(argv):
parser = optparse.OptionParser()
usage = 'Usage: %prog -o <output_dir> [--inputs] [--outputs] <input_files>'
parser.set_usage(usage)
parser.add_option('-o', dest='output_dir')
parser.add_option('--inputs', action='store_true', dest='list_inputs')
parser.add_option('--outputs', action='store_true', dest='list_outputs')
options, arglist = parser.parse_args(argv)
if len(arglist) == 0:
raise WrongNumberOfArgumentsException('<input_files> required.')
files_to_copy = CalcInputs(arglist)
escaped_files = [EscapePath(x) for x in CalcInputs(arglist)]
if options.list_inputs:
return '\n'.join(escaped_files)
if not options.output_dir:
raise WrongNumberOfArgumentsException('-o required.')
if options.list_outputs:
outputs = [os.path.join(options.output_dir, x) for x in escaped_files]
return '\n'.join(outputs)
CopyFiles(files_to_copy, options.output_dir)
return
def main(argv):
try:
result = DoMain(argv[1:])
except WrongNumberOfArgumentsException, e:
print >>sys.stderr, e
return 1
if result:
print result
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
rlworkgroup/metaworld | refs/heads/master | metaworld/policies/sawyer_plate_slide_v2_policy.py | 1 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerPlateSlideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'unused_1': obs[3],
'puck_pos': obs[4:7],
'unused_2': obs[7:-3],
'shelf_x': obs[-3],
'unused_3': obs[-2:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)
action['grab_effort'] = -1.
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = o_d['puck_pos'] + np.array([.0, -.055, .03])
aligned_with_puck = np.linalg.norm(pos_curr[:2] - pos_puck[:2]) <= 0.03
if not aligned_with_puck:
return pos_puck + np.array([.0, .0, .1])
elif abs(pos_curr[2] - pos_puck[2]) > 0.04:
return pos_puck
else:
return np.array([o_d['shelf_x'], .9, pos_puck[2]])
|
icelic/eks | refs/heads/master | py/django_eks/migrations/0008_auto_20170605_1524.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_eks', '0007_auto_20170529_0031'),
]
operations = [
migrations.RemoveField(
model_name='coursesimulation',
name='author',
),
migrations.RemoveField(
model_name='coursesimulation',
name='id',
),
migrations.RemoveField(
model_name='coursesimulation',
name='name',
),
migrations.AddField(
model_name='course',
name='prerequisite_for',
field=models.ForeignKey(to='django_eks.Course', null=True),
),
migrations.AddField(
model_name='coursesimulation',
name='course_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=2, serialize=False, to='django_eks.Course'),
preserve_default=False,
),
]
|
OpenFacetracker/facetracker-core | refs/heads/master | lib/youtube-dl/youtube_dl/extractor/bild.py | 31 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class BildIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
IE_DESC = 'Bild.de'
_TEST = {
'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
'md5': 'dd495cbd99f2413502a1713a1156ac8a',
'info_dict': {
'id': '38184146',
'ext': 'mp4',
'title': 'BILD hat sie getestet',
'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
'duration': 196,
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
doc = self._download_xml(xml_url, video_id)
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
return {
'id': video_id,
'title': doc.attrib['ueberschrift'],
'description': doc.attrib.get('text'),
'url': doc.attrib['src'],
'thumbnail': doc.attrib.get('img'),
'duration': duration,
}
|
marcore/edx-platform | refs/heads/master | common/djangoapps/dark_lang/middleware.py | 46 | """
Middleware for dark-launching languages. These languages won't be used
when determining which translation to give a user based on their browser
header, but can be selected by setting the ``preview-lang`` query parameter
to the language code.
Adding the query parameter ``clear-lang`` will reset the language stored
in the user's session.
This middleware must be placed before the LocaleMiddleware, but after
the SessionMiddleware.
"""
from django.conf import settings
from dark_lang import DARK_LANGUAGE_KEY
from dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.user_api.preferences.api import (
delete_user_preference, get_user_preference, set_user_preference
)
from lang_pref import LANGUAGE_KEY
from django.utils.translation.trans_real import parse_accept_lang_header
from django.utils.translation import LANGUAGE_SESSION_KEY
def dark_parse_accept_lang_header(accept):
'''
The use of 'zh-cn' for 'Simplified Chinese' and 'zh-tw' for 'Traditional Chinese'
are now deprecated, as discussed here: https://code.djangoproject.com/ticket/18419.
The new language codes 'zh-hans' and 'zh-hant' are now used since django 1.7.
Although majority of browsers still use the old language codes, some new browsers
such as IE11 in Windows 8.1 start to use the new ones, which makes the current
chinese translations of edX don't work properly under these browsers.
This function can keep compatibility between the old and new language codes. If one
day edX uses django 1.7 or higher, this function can be modified to support the old
language codes until there are no browsers use them.
'''
browser_langs = parse_accept_lang_header(accept)
django_langs = []
for lang, priority in browser_langs:
lang = CHINESE_LANGUAGE_CODE_MAP.get(lang.lower(), lang)
django_langs.append((lang, priority))
return django_langs
# If django 1.7 or higher is used, the right-side can be updated with new-style codes.
CHINESE_LANGUAGE_CODE_MAP = {
# The following are the new-style language codes for chinese language
'zh-hans': 'zh-CN', # Chinese (Simplified),
'zh-hans-cn': 'zh-CN', # Chinese (Simplified, China)
'zh-hans-sg': 'zh-CN', # Chinese (Simplified, Singapore)
'zh-hant': 'zh-TW', # Chinese (Traditional)
'zh-hant-hk': 'zh-HK', # Chinese (Traditional, Hongkong)
'zh-hant-mo': 'zh-TW', # Chinese (Traditional, Macau)
'zh-hant-tw': 'zh-TW', # Chinese (Traditional, Taiwan)
# The following are the old-style language codes that django does not recognize
'zh-mo': 'zh-TW', # Chinese (Traditional, Macau)
'zh-sg': 'zh-CN', # Chinese (Simplified, Singapore)
}
class DarkLangMiddleware(object):
"""
Middleware for dark-launching languages.
This is configured by creating ``DarkLangConfig`` rows in the database,
using the django admin site.
"""
@property
def released_langs(self):
"""
Current list of released languages
"""
language_options = DarkLangConfig.current().released_languages_list
if settings.LANGUAGE_CODE not in language_options:
language_options.append(settings.LANGUAGE_CODE)
return language_options
def process_request(self, request):
"""
Prevent user from requesting un-released languages except by using the preview-lang query string.
"""
if not DarkLangConfig.current().enabled:
return
self._clean_accept_headers(request)
self._activate_preview_language(request)
def _fuzzy_match(self, lang_code):
"""Returns a fuzzy match for lang_code"""
if lang_code in self.released_langs:
return lang_code
lang_prefix = lang_code.partition('-')[0]
for released_lang in self.released_langs:
released_prefix = released_lang.partition('-')[0]
if lang_prefix == released_prefix:
return released_lang
return None
def _format_accept_value(self, lang, priority=1.0):
"""
Formats lang and priority into a valid accept header fragment.
"""
return "{};q={}".format(lang, priority)
def _clean_accept_headers(self, request):
"""
Remove any language that is not either in ``self.released_langs`` or
a territory of one of those languages.
"""
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', None)
if accept is None or accept == '*':
return
new_accept = []
for lang, priority in dark_parse_accept_lang_header(accept):
fuzzy_code = self._fuzzy_match(lang.lower())
if fuzzy_code:
new_accept.append(self._format_accept_value(fuzzy_code, priority))
new_accept = ", ".join(new_accept)
request.META['HTTP_ACCEPT_LANGUAGE'] = new_accept
def _activate_preview_language(self, request):
"""
If the request has the get parameter ``preview-lang``,
and that language doesn't appear in ``self.released_langs``,
then set the session LANGUAGE_SESSION_KEY to that language.
"""
auth_user = request.user.is_authenticated()
if 'clear-lang' in request.GET:
# delete the session language key (if one is set)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
if auth_user:
# Reset user's dark lang preference to null
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
# Get & set user's preferred language
user_pref = get_user_preference(request.user, LANGUAGE_KEY)
if user_pref:
request.session[LANGUAGE_SESSION_KEY] = user_pref
return
# Get the user's preview lang - this is either going to be set from a query
# param `?preview-lang=xx`, or we may have one already set as a dark lang preference.
preview_lang = request.GET.get('preview-lang', None)
if not preview_lang and auth_user:
# Get the request user's dark lang preference
preview_lang = get_user_preference(request.user, DARK_LANGUAGE_KEY)
# User doesn't have a dark lang preference, so just return
if not preview_lang:
return
# Set the session key to the requested preview lang
request.session[LANGUAGE_SESSION_KEY] = preview_lang
# Make sure that we set the requested preview lang as the dark lang preference for the
# user, so that the lang_pref middleware doesn't clobber away the dark lang preview.
if auth_user:
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_lang)
|
RichDijk/eXe | refs/heads/master | nevow/test/test_i18n.py | 14 | from twisted.trial import unittest
from cStringIO import StringIO
from nevow import inevow, flat, context, tags, loaders, rend
from nevow import i18n
def mockTranslator(s, domain=None):
args = {}
if domain is not None:
args['domain'] = domain
return 'MOCK(%s)[%s]' % (', '.join(['%s=%r' % (k,v)
for k,v in args.items()]),
s)
class Misc(unittest.TestCase):
def test_simple(self):
s = i18n._('foo')
def test_simple_flat(self):
s = i18n._('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, 'foo')
def test_translator(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, 'MOCK()[foo]')
class Config(unittest.TestCase):
def test_remember(self):
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='foo')
ctx.remember(cfg)
class Domain(unittest.TestCase):
def test_classInit(self):
_ = i18n.Translator(translator=mockTranslator,
domain='bar')
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='bar')[foo]")
def test_runTime(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo', domain='baz')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
def test_context(self):
_ = i18n.Translator(translator=mockTranslator)
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='thud')
ctx.remember(cfg)
s = _('foo')
r = flat.ten.flatten(s, ctx)
self.assertEquals(r, "MOCK(domain='thud')[foo]")
def test_runTime_beats_all(self):
_ = i18n.Translator(translator=mockTranslator,
domain='not-used1')
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='not-used2')
ctx.remember(cfg)
s = _('foo', domain='baz')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
def test_classInit_beats_context(self):
_ = i18n.Translator(translator=mockTranslator,
domain='baz')
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='not-used')
ctx.remember(cfg)
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
class Format(unittest.TestCase):
def test_simple(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo %s') % 'bar'
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK()[foo bar]")
def test_multiple(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo %s')
s = s % 'bar %s'
s = s % 'baz'
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK()[foo bar baz]")
class FakeRequest(object):
__implements__ = inevow.IRequest,
def __init__(self, headers):
self.headers = headers
def getHeader(self, key):
return self.headers.get(key, None)
class Languages(unittest.TestCase):
def test_noLanguages(self):
request = FakeRequest(headers={})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, [])
def test_oneLanguage(self):
request = FakeRequest(headers={
'accept-language': 'fo',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo'])
def test_multipleLanguages(self):
request = FakeRequest(headers={
'accept-language': 'fo,ba,th',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo', 'ba', 'th'])
def test_quality_simple(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo'])
def test_quality_sort(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;q=0.2,xy;q=0.9',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['xy', 'fo', 'ba'])
def test_quality_invalid_notQ(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;z=0.2',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['ba', 'fo'])
def test_quality_invalid_notFloat(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;q=junk',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['ba', 'fo'])
class Render(unittest.TestCase):
def makePage(self, content):
_ = i18n.Translator(translator=mockTranslator)
page = rend.Page(
docFactory=loaders.stan(tags.invisible(render=tags.directive('i18n'))[content]))
page.render_i18n = i18n.render(_)
doc = page.docFactory.load()
ctx = context.WovenContext(context.PageContext(tag=page),
tags.invisible[doc])
page.rememberStuff(ctx)
io = StringIO()
writer = io.write
def finisher(result):
return io.getvalue()
d = page.flattenFactory(doc, ctx, writer, finisher)
r = unittest.deferredResult(d, 1)
return r
def test_empty(self):
r = self.makePage([''])
self.assertEquals(r, 'MOCK()[]')
def test_simple(self):
r = self.makePage(['foo'])
self.assertEquals(r, 'MOCK()[foo]')
def test_stan(self):
r = self.makePage([tags.p['You should really avoid tags in i18n input.']])
self.assertEquals(r, 'MOCK()[<p>You should really avoid tags in i18n input.</p>]')
class InterpolateTests:
def test_mod_string(self):
self.check('foo %s', 'bar',
'foo bar')
def test_mod_unicode(self):
self.check('foo %s', u'bar',
'foo bar')
# Tuples are a special case, 'foo %s' % ('bar', 'baz') does not
# work. Also, 'foo %s %s' only works with tuples.
def test_mod_tuple_two(self):
self.check('foo %s %s', ('bar', 'baz'),
"foo bar baz")
def test_mod_tuple_complex(self):
self.check('foo %s %s %s', ([1, 2], (3, 4), {5: 6}),
"foo [1, 2] (3, 4) {5: 6}")
def test_mod_list_stringify(self):
self.check('foo %s', ['bar', 'baz'],
"foo ['bar', 'baz']")
def test_mod_list_reprify(self):
self.check('foo %r', ['bar', 'baz'],
"foo ['bar', 'baz']")
def test_mod_dict_stringify(self):
self.check('foo %s', {'bar': 1, 'baz': 2},
"foo {'bar': 1, 'baz': 2}",
"foo {'baz': 2, 'bar': 1}")
def test_mod_dict_reprify(self):
self.check('foo %r', {'bar': 1, 'baz': 2},
"foo {'bar': 1, 'baz': 2}",
"foo {'baz': 2, 'bar': 1}")
def test_mod_dict_two(self):
self.check('foo %(bar)s %(baz)s', {'bar': 1, 'baz': 2},
"foo 1 2")
class InterpolateMixin:
def setUp(self):
self._ = i18n.Translator(translator=mockTranslator)
def mangle(self, s):
raise NotImplementedError, 'override mangle somewhere'
def check(self, fmt, args, *wants):
got = self.mangle(self._(fmt) % args)
self.failUnlessIn(got, wants)
class Repr(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
return repr(s)
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
"PlaceHolder(translator=%r, original=%r) %% %r" % \
(mockTranslator, fmt, args))
class Str(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
return str(s)
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
"PlaceHolder(translator=%r, original=%r) %% %r" % \
(mockTranslator, fmt, args))
class Interpolation(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
r = flat.ten.flatten(s, None)
return r
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
*['MOCK()[%s]' % x for x in wants])
|
Lekanich/intellij-community | refs/heads/master | python/testData/refactoring/move/packageImport/after/src/a.py | 45382 | |
home-assistant/home-assistant | refs/heads/dev | tests/components/facebox/test_image_processing.py | 8 | """The tests for the facebox component."""
from unittest.mock import Mock, mock_open, patch
import pytest
import requests
import requests_mock
import homeassistant.components.facebox.image_processing as fb
import homeassistant.components.image_processing as ip
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_FRIENDLY_NAME,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_OK,
HTTP_UNAUTHORIZED,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
MOCK_IP = "192.168.0.1"
MOCK_PORT = "8080"
# Mock data returned by the facebox API.
MOCK_BOX_ID = "b893cc4f7fd6"
MOCK_ERROR_NO_FACE = "No face found"
MOCK_FACE = {
"confidence": 0.5812028911604818,
"id": "john.jpg",
"matched": True,
"name": "John Lennon",
"rect": {"height": 75, "left": 63, "top": 262, "width": 74},
}
MOCK_FILE_PATH = "/images/mock.jpg"
MOCK_HEALTH = {
"success": True,
"hostname": "b893cc4f7fd6",
"metadata": {"boxname": "facebox", "build": "development"},
"errors": [],
}
MOCK_JSON = {"facesCount": 1, "success": True, "faces": [MOCK_FACE]}
MOCK_NAME = "mock_name"
MOCK_USERNAME = "mock_username"
MOCK_PASSWORD = "mock_password"
# Faces data after parsing.
PARSED_FACES = [
{
fb.FACEBOX_NAME: "John Lennon",
fb.ATTR_IMAGE_ID: "john.jpg",
fb.ATTR_CONFIDENCE: 58.12,
fb.ATTR_MATCHED: True,
fb.ATTR_BOUNDING_BOX: {"height": 75, "left": 63, "top": 262, "width": 74},
}
]
MATCHED_FACES = {"John Lennon": 58.12}
VALID_ENTITY_ID = "image_processing.facebox_demo_camera"
VALID_CONFIG = {
ip.DOMAIN: {
"platform": "facebox",
CONF_IP_ADDRESS: MOCK_IP,
CONF_PORT: MOCK_PORT,
ip.CONF_SOURCE: {ip.CONF_ENTITY_ID: "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
@pytest.fixture
def mock_healthybox():
"""Mock fb.check_box_health."""
check_box_health = (
"homeassistant.components.facebox.image_processing.check_box_health"
)
with patch(check_box_health, return_value=MOCK_BOX_ID) as _mock_healthybox:
yield _mock_healthybox
@pytest.fixture
def mock_isfile():
"""Mock os.path.isfile."""
with patch(
"homeassistant.components.facebox.image_processing.cv.isfile", return_value=True
) as _mock_isfile:
yield _mock_isfile
@pytest.fixture
def mock_image():
"""Return a mock camera image."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.camera_image",
return_value=b"Test",
) as image:
yield image
@pytest.fixture
def mock_open_file():
"""Mock open."""
mopen = mock_open()
with patch(
"homeassistant.components.facebox.image_processing.open", mopen, create=True
) as _mock_open:
yield _mock_open
def test_check_box_health(caplog):
"""Test check box health."""
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/healthz"
mock_req.get(url, status_code=HTTP_OK, json=MOCK_HEALTH)
assert fb.check_box_health(url, "user", "pass") == MOCK_BOX_ID
mock_req.get(url, status_code=HTTP_UNAUTHORIZED)
assert fb.check_box_health(url, None, None) is None
assert "AuthenticationError on facebox" in caplog.text
mock_req.get(url, exc=requests.exceptions.ConnectTimeout)
fb.check_box_health(url, None, None)
assert "ConnectionError: Is facebox running?" in caplog.text
def test_encode_image():
"""Test that binary data is encoded correctly."""
assert fb.encode_image(b"test") == "dGVzdA=="
def test_get_matched_faces():
"""Test that matched_faces are parsed correctly."""
assert fb.get_matched_faces(PARSED_FACES) == MATCHED_FACES
def test_parse_faces():
"""Test parsing of raw face data, and generation of matched_faces."""
assert fb.parse_faces(MOCK_JSON["faces"]) == PARSED_FACES
@patch("os.access", Mock(return_value=False))
def test_valid_file_path():
"""Test that an invalid file_path is caught."""
assert not fb.valid_file_path("test_path")
async def test_setup_platform(hass, mock_healthybox):
"""Set up platform with one entity."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
async def test_setup_platform_with_auth(hass, mock_healthybox):
"""Set up platform with one entity and auth."""
valid_config_auth = VALID_CONFIG.copy()
valid_config_auth[ip.DOMAIN][CONF_USERNAME] = MOCK_USERNAME
valid_config_auth[ip.DOMAIN][CONF_PASSWORD] = MOCK_PASSWORD
await async_setup_component(hass, ip.DOMAIN, valid_config_auth)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
async def test_process_image(hass, mock_healthybox, mock_image):
"""Test successful processing of an image."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
face_events.append(event)
hass.bus.async_listen("image_processing.detect_face", mock_face_event)
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.post(url, json=MOCK_JSON)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "1"
assert state.attributes.get("matched_faces") == MATCHED_FACES
assert state.attributes.get("total_matched_faces") == 1
PARSED_FACES[0][ATTR_ENTITY_ID] = VALID_ENTITY_ID # Update.
assert state.attributes.get("faces") == PARSED_FACES
assert state.attributes.get(CONF_FRIENDLY_NAME) == "facebox demo_camera"
assert len(face_events) == 1
assert face_events[0].data[ATTR_NAME] == PARSED_FACES[0][ATTR_NAME]
assert (
face_events[0].data[fb.ATTR_CONFIDENCE] == PARSED_FACES[0][fb.ATTR_CONFIDENCE]
)
assert face_events[0].data[ATTR_ENTITY_ID] == VALID_ENTITY_ID
assert face_events[0].data[fb.ATTR_IMAGE_ID] == PARSED_FACES[0][fb.ATTR_IMAGE_ID]
assert (
face_events[0].data[fb.ATTR_BOUNDING_BOX]
== PARSED_FACES[0][fb.ATTR_BOUNDING_BOX]
)
async def test_process_image_errors(hass, mock_healthybox, mock_image, caplog):
"""Test process_image errors."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
# Test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, exc=requests.exceptions.ConnectTimeout)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == STATE_UNKNOWN
assert state.attributes.get("faces") == []
assert state.attributes.get("matched_faces") == {}
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, status_code=HTTP_UNAUTHORIZED)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
async def test_teach_service(
hass, mock_healthybox, mock_image, mock_isfile, mock_open_file, caplog
):
"""Test teaching of facebox."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
# Patch out 'is_allowed_path' as the mock files aren't allowed
hass.config.is_allowed_path = Mock(return_value=True)
# Test successful teach.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_OK)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_UNAUTHORIZED)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
# Now test the failed teaching.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_BAD_REQUEST, text=MOCK_ERROR_NO_FACE)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert MOCK_ERROR_NO_FACE in caplog.text
# Now test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, exc=requests.exceptions.ConnectTimeout)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
async def test_setup_platform_with_name(hass, mock_healthybox):
"""Set up platform with one entity and a name."""
named_entity_id = f"image_processing.{MOCK_NAME}"
valid_config_named = VALID_CONFIG.copy()
valid_config_named[ip.DOMAIN][ip.CONF_SOURCE][ip.CONF_NAME] = MOCK_NAME
await async_setup_component(hass, ip.DOMAIN, valid_config_named)
await hass.async_block_till_done()
assert hass.states.get(named_entity_id)
state = hass.states.get(named_entity_id)
assert state.attributes.get(CONF_FRIENDLY_NAME) == MOCK_NAME
|
sonnyhu/scipy | refs/heads/master | scipy/weave/tests/test_slice_handler.py | 96 | from __future__ import absolute_import, print_function
import parser
from numpy.testing import TestCase, assert_equal, run_module_suite
from scipy.weave import slice_handler
from scipy.weave.slice_handler import indexed_array_pattern
from scipy.weave.ast_tools import ast_to_string, find_first_pattern
from weave_test_utils import remove_whitespace
class TestBuildSliceAtom(TestCase):
def generic_check(self,slice_vars,desired):
pos = slice_vars['pos']
ast_list = slice_handler.build_slice_atom(slice_vars,pos)
actual = ast_to_string(ast_list)
assert_equal(actual,desired)
def test_exclusive_end(self):
slice_vars = {'begin':'1', 'end':'2', 'step':'_stp',
'single_index':'_index','pos':0}
desired = 'slice(1,2-1)'
self.generic_check(slice_vars,desired)
class TestSlice(TestCase):
def generic_check(self,suite_string,desired):
ast_tuple = parser.suite(suite_string).totuple()
found, data = find_first_pattern(ast_tuple,indexed_array_pattern)
subscript = data['subscript_list'][1] # [0] is symbol, [1] is the subscript
actual = slice_handler.slice_ast_to_dict(subscript)
assert_equal(actual,desired,suite_string)
def test_empty_2_slice(self):
# match slice from a[:]
test = "a[:]"
desired = {'begin':'_beg', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_2_slice(self):
# match slice from a[1:]
test = "a[1:]"
desired = {'begin':'1', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_2_slice(self):
# match slice from a[:2]
test = "a[:2]"
desired = {'begin':'_beg', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_2_slice(self):
# match slice from a[1:2]
test = "a[1:2]"
desired = {'begin':'1', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_empty_3_slice(self):
# match slice from a[::]
test = "a[::]"
desired = {'begin':'_beg', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_3_slice(self):
# match slice from a[1::]
test = "a[1::]"
desired = {'begin':'1', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_3_slice(self):
# match slice from a[:2:]
test = "a[:2:]"
desired = {'begin':'_beg', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_stp3_slice(self):
# match slice from a[::3]
test = "a[::3]"
desired = {'begin':'_beg', 'end':'_end', 'step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_3_slice(self):
# match slice from a[1:2:]
test = "a[1:2:]"
desired = {'begin':'1', 'end':'2','step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_step_3_slice(self):
# match slice from a[1::3]
test = "a[1::3]"
desired = {'begin':'1', 'end':'_end','step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_step_3_slice(self):
# match slice from a[:2:3]
test = "a[:2:3]"
desired = {'begin':'_beg', 'end':'2', 'step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_stp3_slice(self):
# match slice from a[1:2:3]
test = "a[1:2:3]"
desired = {'begin':'1', 'end':'2', 'step':'3','single_index':'_index'}
self.generic_check(test,desired)
def test_expr_3_slice(self):
# match slice from a[:1+i+2:]
test = "a[:1+i+2:]"
desired = {'begin':'_beg', 'end':"1+i+2",'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_single_index(self):
# match slice from a[0]
test = "a[0]"
desired = {'begin':'_beg', 'end':"_end",'step':'_stp',
'single_index':'0'}
self.generic_check(test,desired)
class TestTransformSlices(TestCase):
def generic_check(self,suite_string,desired):
ast_list = parser.suite(suite_string).tolist()
slice_handler.transform_slices(ast_list)
actual = ast_to_string(ast_list)
# Remove white space from expressions so that equivalent
# but differently formatted string will compare equally
actual = remove_whitespace(actual)
desired = remove_whitespace(desired)
assert_equal(actual,desired,suite_string)
def test_simple_expr1(self):
# transform a[:] to slice notation
test = "a[:]"
desired = 'a[slice(_beg,_end)]'
self.generic_check(test,desired)
def test_simple_expr2(self):
test = "a[:,:] = b[:,1:1+2:3] *(c[1-2+i:,:] - c[:,:])"
desired = " a[slice(_beg,_end),slice(_beg,_end)] = "\
" b[slice(_beg,_end), slice(1,1+2-1,3)] *"\
" (c[slice(1-2+i,_end), slice(_beg,_end)] -"\
" c[slice(_beg,_end), slice(_beg,_end)])"
self.generic_check(test,desired)
if __name__ == "__main__":
run_module_suite()
|
harshilasu/GraphicMelon | refs/heads/master | y/google-cloud-sdk/lib/jinja2/meta.py | 659 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast)
set(['bar'])
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
blaggacao/OpenUpgrade | refs/heads/8.0 | addons/account_test/__init__.py | 441 | import account_test
import report
|
tangledhelix/SublimeLinter-for-ST2 | refs/heads/master | sublimelinter/modules/objective-j.py | 9 | # objective-j.py - Lint checking for Objective-J - given filename and contents of the code:
# It provides a list of line numbers to outline and offsets to highlight.
#
# This specific module is part of the SublimeLinter project.
# It is a fork of the original SublimeLint project,
# (c) 2011 Ryan Hileman and licensed under the MIT license.
# URL: http://bochs.info/
#
# The original copyright notices for this file/project follows:
#
# (c) 2005-2008 Divmod, Inc.
# See LICENSE file for details
#
# The LICENSE file is as follows:
#
# Copyright (c) 2005 Divmod, Inc., http://www.divmod.com/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from capp_lint import LintChecker
from base_linter import BaseLinter
CONFIG = {
'language': 'Objective-J'
}
class Linter(BaseLinter):
def built_in_check(self, view, code, filename):
checker = LintChecker(view)
checker.lint_text(code, filename)
return checker.errors
def parse_errors(self, view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages):
for error in errors:
lineno = error['lineNum']
self.add_message(lineno, lines, error['message'], errorMessages if type == LintChecker.ERROR_TYPE_ILLEGAL else warningMessages)
for position in error.get('positions', []):
self.underline_range(view, lineno, position, errorUnderlines if type == LintChecker.ERROR_TYPE_ILLEGAL else warningUnderlines)
|
mick-d/nipype_source | refs/heads/master | nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py | 5 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.model import ContrastMgr
def test_ContrastMgr_inputs():
input_map = dict(args=dict(argstr='%s',
),
contrast_num=dict(argstr='-cope',
),
corrections=dict(copyfile=False,
mandatory=True,
),
dof_file=dict(argstr='',
copyfile=False,
mandatory=True,
),
environ=dict(nohash=True,
usedefault=True,
),
fcon_file=dict(argstr='-f %s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
output_type=dict(),
param_estimates=dict(argstr='',
copyfile=False,
mandatory=True,
),
sigmasquareds=dict(argstr='',
copyfile=False,
mandatory=True,
position=-2,
),
suffix=dict(argstr='-suffix %s',
),
tcon_file=dict(argstr='%s',
mandatory=True,
position=-1,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = ContrastMgr.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ContrastMgr_outputs():
output_map = dict(copes=dict(),
fstats=dict(),
neffs=dict(),
tstats=dict(),
varcopes=dict(),
zfstats=dict(),
zstats=dict(),
)
outputs = ContrastMgr.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
ekampf/webapp2_requestparser | refs/heads/master | tests/reqparse/arguments/__init__.py | 14224 | # -*- coding: utf-8 -*-
|
chaowyc/youtube-dl | refs/heads/master | youtube_dl/extractor/libsyn.py | 106 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
'md5': '443360ee1b58007bc3dcf09b41d093bb',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
formats = [{
'url': media_url,
} for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
podcast_title = self._search_regex(
r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<div id="info_text_body">(.+?)</div>', webpage,
'description', default=None)
thumbnail = self._search_regex(
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'formats': formats,
}
|
lmyrefelt/CouchPotatoServer | refs/heads/develop | couchpotato/core/plugins/userscript/main.py | 1 | from couchpotato import index
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import isDict
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from tornado.web import RequestHandler
import os
log = CPLog(__name__)
class Userscript(Plugin):
version = 3
def __init__(self):
addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True)
addApiView('userscript', self.iFrame)
addApiView('userscript.add_via_url', self.getViaUrl)
addApiView('userscript.includes', self.getIncludes)
addApiView('userscript.bookmark', self.bookmark)
addEvent('userscript.get_version', self.getVersion)
def bookmark(self, host = None, **kwargs):
params = {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
'host': host,
}
return self.renderTemplate(__file__, 'bookmark.js', **params)
def getIncludes(self):
return {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
}
def getUserScript(self, route):
klass = self
class UserscriptHandler(RequestHandler):
def get(self, random, route):
params = {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
'version': klass.getVersion(),
'api': '%suserscript/' % Env.get('api_base'),
'host': '%s://%s' % (self.request.protocol, self.request.host),
}
script = klass.renderTemplate(__file__, 'template.js', **params)
klass.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script)
self.redirect(Env.get('api_base') + 'file.cache/couchpotato.user.js')
Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), UserscriptHandler)])
def getVersion(self):
versions = fireEvent('userscript.get_provider_version')
version = self.version
for v in versions:
version += v
return version
def iFrame(self, **kwargs):
return index()
def getViaUrl(self, url = None, **kwargs):
print url
params = {
'url': url,
'movie': fireEvent('userscript.get_movie_via_url', url = url, single = True)
}
if not isDict(params['movie']):
log.error('Failed adding movie via url: %s', url)
params['error'] = params['movie'] if params['movie'] else 'Failed getting movie info'
return params
|
botswana-harvard/edc-tracker | refs/heads/develop | edc_quota/controller/models.py | 2 | from django.db import models
from django.utils import timezone
class ControllerQuota(models.Model):
"""Controllers quota model where each instance refers to a quota that
this controller is managing.
For example, a quota on the controller might be an enrollment cap
(target) of 3000 applied to the model
subject.Enrollment."""
app_label = models.CharField(max_length=25)
model_name = models.CharField(max_length=25)
target = models.IntegerField()
start_date = models.DateField()
expiration_date = models.DateField()
max_allocation = models.IntegerField(
blank=True,
null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.model_name
class Meta:
app_label = 'edc_quota'
class ControllerQuotaHistory(models.Model):
"""Controllers quota history model.
A new instance is created each time the controller updates quotas on
the client for 'quota'."""
get_latest_by = "quota_datetime"
quota = models.ForeignKey(ControllerQuota)
model_count = models.IntegerField(
editable=False,
default=0)
clients_contacted = models.CharField(
max_length=500,
editable=False,
null=True)
start_date = models.DateField(
editable=False,
null=True)
expiration_date = models.DateField(
editable=False,
null=True)
contacted = models.DateTimeField(
editable=False,
null=True)
quota_datetime = models.DateTimeField(
editable=False,
default=timezone.now)
def __str__(self):
return str(self.quota)
@property
def clients_contacted_list(self):
clients_contacted = []
if self.clients_contacted:
clients_contacted = self.clients_contacted.split(',')
return clients_contacted
class Meta:
app_label = 'edc_quota'
ordering = ('-quota_datetime', )
class Client(models.Model):
"""Clients to populate registry on the Controller."""
hostname = models.CharField(max_length=25)
port = models.IntegerField(default=80)
api_name = models.CharField(max_length=25, default='v1')
app_label = models.CharField(max_length=25)
model_name = models.CharField(max_length=25)
target = models.IntegerField(
editable=False,
default=0)
model_count = models.IntegerField(
editable=False,
default=0)
start_date = models.DateField(
editable=False,
null=True)
expiration_date = models.DateField(
editable=False,
null=True)
contacted = models.DateTimeField(
editable=False,
null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.hostname)
def quota(self):
return ControllerQuota.objects.get(app_label=self.app_label, model_name=self.model_name)
quota.short_description = 'Quota'
@property
def url(self):
return 'http://{}:{}/edc_quota/api/{}/?app_label={}&model_name={}&format=json'.format(
self.hostname, self.port, self.api_name, self.app_label, self.model_name)
@property
def post_url(self):
return 'http://{}:{}/edc_quota/api/{}/{}/'.format(
self.hostname, self.port, self.api_name, self.model_name.lower())
@property
def name(self):
return self.hostname
class Meta:
app_label = 'edc_quota'
|
jgonthier/psi4 | refs/heads/master | psi4/driver/qcdb/libmintsmolecule.py | 3 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import copy
import math
import collections
import numpy as np
import qcelemental as qcel
from .vecutil import *
from .exceptions import *
from .libmintscoordentry import NumberValue, VariableValue, CartesianEntry, ZMatrixEntry
from .libmintspointgrp import SymmOps, similar, SymmetryOperation, PointGroup
LINEAR_A_TOL = 1.0E-2 # When sin(a) is below this, we consider the angle to be linear
DEFAULT_SYM_TOL = 1.0E-8
FULL_PG_TOL = 1.0e-8
ZERO = 1.0E-14
NOISY_ZERO = 1.0E-8
class LibmintsMolecule():
"""Class to store the elements, coordinates, fragmentation pattern,
charge, multiplicity of a molecule. Largely replicates psi4's libmints
Molecule class, developed by Justin M. Turney and Andy M. Simmonett
with incremental improvements by other psi4 developers.
Roughly, this class mirrors `psi4.core.Molecule`; however, it's never
used directly and instead serves as a base class for `qcdb.Molecule`
in alongside-file molecule.py.
>>> H2OH2O = qcdb.Molecule(\"\"\"
0 1
O1 -1.551007 -0.114520 0.000000
H1 -1.934259 0.762503 0.000000
H2 -0.599677 0.040712 0.000000
--
0 1
X 0.000000 0.000000 0.000000
O2 1.350625 0.111469 0.000000
H3 1.680398 -0.373741 -0.758561
H4 1.680398 -0.373741 0.758561
no_com
no_reorient
units angstrom
\"\"\")
>>> H2O = qcdb.Molecule.init_with_xyz('h2o.xyz')
"""
FullPointGroupList = ["ATOM", "C_inf_v", "D_inf_h", "C1", "Cs", "Ci", \
"Cn", "Cnv", "Cnh", "Sn", "Dn", "Dnd", "Dnh", "Td", "Oh", "Ih"]
def __init__(self):
"""Initialize Molecule object from string in psi4 format"""
# <<< Basic Molecule Information >>>
# Molecule (or fragment) name
self.PYname = 'default'
# Molecule comment
self.PYcomment = ''
# Molecule origin
self.PYprovenance = []
# Molecule connectivity
self.PYconnectivity = []
# The molecular charge
self.PYmolecular_charge = 0
# The multiplicity (defined as 2Ms + 1)
self.PYmultiplicity = 1
# The units used to define the geometry
self.PYunits = 'Angstrom'
# The conversion factor to take input units to Bohr
self.PYinput_units_to_au = 1.0 / qcel.constants.bohr2angstroms
# Whether this molecule has at least one zmatrix entry
self.zmat = False # TODO None?
# Whether this molecule has at least one Cartesian entry
self.cart = False # TODO None?
# <<< Coordinates >>>
# Atom info vector (no knowledge of dummy atoms)
self.atoms = []
# Atom info vector (includes dummy atoms)
self.full_atoms = []
# A list of all variables known, whether they have been set or not.
self.all_variables = []
# A listing of the variables used to define the geometries
self.geometry_variables = {}
# Limited lifetime efficiency boost
self.wholegeom = None
# <<< Fragmentation >>>
# The list of atom ranges defining each fragment from parent molecule
self.fragments = []
# A list describing how to handle each fragment
self.fragment_types = []
# The charge of each fragment
self.fragment_charges = []
# The multiplicity of each fragment
self.fragment_multiplicities = []
# <<< Frame >>>
# Move to center of mass or not?
self.PYmove_to_com = True
# Reorient or not? UNUSED
self.PYfix_orientation = False
# Reinterpret the coord entries or not (Default is true, except for findif)
self.PYreinterpret_coordentries = True
# Nilpotence boolean (flagged upon first determination of symmetry frame,
# reset each time a substantiative change is made)
self.lock_frame = False
# <<< Symmetry >>>
# Point group to use with this molecule
self.pg = None
# Full point group
self.full_pg = 'C1'
# n of the highest rotational axis Cn
self.PYfull_pg_n = 1
# Symmetry string from geometry specification
self.PYsymmetry_from_input = None
# Number of unique atoms
self.PYnunique = 0
# Number of equivalent atoms per unique atom (length nunique)
self.nequiv = 0
# Equivalent atom mapping array (length 1st dim nunique)
self.equiv = 0
# Atom to unique atom mapping array (length natom)
self.PYatom_to_unique = 0
# <<< Simple Methods for Basic Molecule Information >>>
def name(self):
"""Get molecule name
>>> print(H2OH2O.name())
water_dimer
"""
return self.PYname
def set_name(self, name):
"""Set molecule name
>>> H2OH2O.set_name('water_dimer')
"""
self.PYname = name
def comment(self):
"""Get molecule comment
>>> print(H2OH2O.comment())
I am S22-2
"""
return self.PYcomment
def set_comment(self, comment):
"""Set molecule comment
>>> H2OH2O.set_comment('I am S22-2')
"""
self.PYcomment = comment
def provenance(self):
"""Get molecule provenance
>>> print(H2OH2O.provenance())
{'creator': 'QCElemental',
'routine': 'qcelemental.molparse.from_arrays',
'version': 'v0.1.0a+8.g465f4e3'}
"""
return copy.deepcopy(self.PYprovenance)
def set_provenance(self, provenance):
"""Set molecule provenance
>>> H2OH2O.set_provenance('water_dimer')
"""
self.PYprovenance = provenance
def connectivity(self):
"""Get molecule connectivity
>>> print(H2OH2O.connectivity())
[(0, 1, 1.0), (0, 2, 1.0)]
"""
return copy.deepcopy(self.PYconnectivity)
def set_connectivity(self, connectivity):
"""Set molecule connectivity
>>> H2OH2O.set_connectivity([(0, 1, 1.0), (0, 2, 1.0)])
"""
self.PYconnectivity = connectivity
def natom(self):
"""Number of atoms
>>> print(H2OH2O.natom())
6
"""
return len(self.atoms)
def nallatom(self):
"""Number of all atoms (includes dummies)
>>> print(H2OH2O.nallatom())
7
"""
return len(self.full_atoms)
def molecular_charge(self):
"""Gets the molecular charge
>>> print(H2OH2O.molecular_charge())
-2
"""
return self.PYmolecular_charge
def set_molecular_charge(self, charge):
"""Sets the molecular charge
>>> H2OH2O.set_molecular_charge(-2)
"""
if not float(charge).is_integer():
raise ValidationError('System charge must be integer: {}'.format(charge))
self.PYcharge_specified = True
self.PYmolecular_charge = int(charge)
def multiplicity(self):
"""Get the multiplicity (defined as 2Ms + 1)
>>> print(H2OH2O.multiplicity())
"""
return self.PYmultiplicity
def set_multiplicity(self, mult):
"""Sets the multiplicity (defined as 2Ms + 1)
>>> H2OH2O.set_multiplicity(3)
"""
if not float(mult).is_integer() or float(mult) < 0.0:
raise ValidationError('System multiplicity must be positive integer: {}'.format(mult))
self.PYmultiplicity_specified = True
self.PYmultiplicity = int(mult)
def units(self):
"""Gets the geometry units
>>> print(H2OH2O.units())
Angstrom
"""
return self.PYunits
def set_units(self, units):
"""Sets the geometry units (constructor use).
Parameters
----------
units : {'Angstrom', 'Bohr'}
Units of input geometry.
Returns
-------
None
Examples
--------
# [1]
>>> H2OH2O.set_units('Angstrom')
"""
if units == 'Angstrom':
self.PYunits = units
self.PYinput_units_to_au = 1.0 / qcel.constants.bohr2angstroms
elif units == 'Bohr':
self.PYunits = units
self.PYinput_units_to_au = 1.0
else:
raise ValidationError("""Molecule::set_units: argument must be 'Angstrom' or 'Bohr'.""")
def input_units_to_au(self):
"""Gets the geometry unit conversion."""
return self.PYinput_units_to_au
def set_input_units_to_au(self, conv):
"""Sets the geometry unit conversion. May be used to override internal a2b physconst"""
if abs(conv - self.PYinput_units_to_au) < 0.05:
self.PYinput_units_to_au = conv
else:
raise ValidationError("""No big perturbations to physical constants!""")
def has_zmatrix(self):
"""Gets the presence of any zmatrix entry
>>> print(H2OH2O.has_zmatrix())
False
"""
return self.zmat
def set_has_zmatrix(self, tf):
"""Sets the presence of any zmatrix entry
>>> H2OH2O.set_has_zmatrix(True)
"""
self.zmat = tf
def has_cartesian(self):
"""Gets the presence of any Cartesian entry
>>> print(H2OH2O.has_cartesian())
False
"""
return self.cart
def set_has_cartesian(self, tf):
"""Sets the presence of any Cartesian entry
>>> H2OH2O.set_has_cartesian(True)
"""
self.cart = tf
# <<< Simple Methods for Coordinates >>>
def Z(self, atom):
"""Nuclear charge of atom (0-indexed)
>>> print(H2OH2O.Z(4))
1
"""
return self.atoms[atom].Z()
def x(self, atom):
"""x position of atom (0-indexed) in Bohr
>>> print(H2OH2O.x(4))
3.17549201425
"""
return self.input_units_to_au() * self.atoms[atom].compute()[0]
def y(self, atom):
"""y position of atom (0-indexed) in Bohr
>>> print(H2OH2O.y(4))
-0.706268134631
"""
return self.input_units_to_au() * self.atoms[atom].compute()[1]
def z(self, atom):
"""z position of atom (0-indexed) in Bohr
>>> print(H2OH2O.z(4))
-1.43347254509
"""
return self.input_units_to_au() * self.atoms[atom].compute()[2]
def xyz(self, atom, np_out=False):
"""Returns a Vector3 with x, y, z position of atom (0-indexed)
in Bohr or coordinate at *posn*
>>> print(H2OH2O.xyz(4))
[3.175492014248769, -0.7062681346308132, -1.4334725450878665]
"""
xyz = self.input_units_to_au() * np.asarray(self.atoms[atom].compute())
if np_out:
return xyz
else:
return xyz.tolist()
def mass(self, atom):
"""Returns mass of atom (0-indexed)
>>> print(H2OH2O.mass(4))
1.00782503207
"""
if self.atoms[atom].mass() != 0.0:
return self.atoms[atom].mass()
if math.fabs(self.atoms[atom].Z() - int(self.atoms[atom].Z())) > 0.0:
print("""WARNING: Obtaining masses from atom with fractional charge...may be incorrect!!!\n""")
# TODO outfile
return qcel.periodictable.to_mass(int(self.atoms[atom].Z()))
def set_mass(self, atom, mass):
"""Set the mass of a particular atom (good for isotopic substitutions).
Parameters
----------
atom : int
0-indexed, dummy-inclusive atom index to set.
mass : float
Non-negative mass in [u] for `atom`.
Returns
-------
None
"""
mass = float(mass)
if mass < 0.0:
raise ValidationError('Mass must be positive: {}'.format(mass))
self.lock_frame = False
self.full_atoms[atom].set_mass(mass)
self.full_atoms[atom].set_A(-1)
def symbol(self, atom):
"""Returns the cleaned up label of the atom (C2 => C, H4 = H) (0-indexed)
>>> print(H2OH2O.symbol(4))
H
"""
return self.atoms[atom].symbol()
def label(self, atom):
"""Returns the original label of the atom (0-indexed) as given in the input file (C2, H4). (0-indexed)
>>> print(H2OH2O.label(4))
H3
"""
return self.atoms[atom].label()
def charge(self, atom):
"""Returns charge of atom (0-indexed).
Related to SAD guess in libmints version.
>>> print(H2OH2O.charge(4))
1.0
"""
return self.atoms[atom].charge()
def mass_number(self, atom):
"""Mass number (A) of atom (0-indexed)
>>> print(H2OH2O.mass_number(4))
1
"""
return self.atoms[atom].A()
def fZ(self, atom):
"""Nuclear charge of atom (includes dummies)
>>> print(H2OH2O.fZ(4))
8
"""
return self.full_atoms[atom].Z()
def fx(self, atom):
"""x position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fx(4))
2.55231135823
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[0]
def fy(self, atom):
"""y position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fy(4))
0.210645882307
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[1]
def fz(self, atom):
"""z position of atom (0-indexed, includes dummies) in Bohr
>>> print(H2OH2O.fz(4))
0.0
"""
return self.input_units_to_au() * self.full_atoms[atom].compute()[2]
def fxyz(self, atom):
"""Returns a Vector3 with x, y, z position of atom
(0-indexed) in Bohr (includes dummies)
>>> print(H2OH2O.fxyz(4))
[2.5523113582286716, 0.21064588230662976, 0.0]
"""
return scale(self.full_atoms[atom].compute(), self.input_units_to_au())
def fmass(self, atom):
"""Returns mass of atom (0-indexed, includes dummies)
>>> print(H2OH2O.fmass(4))
15.9949146196
"""
return self.full_atoms[atom].mass()
def fsymbol(self, atom):
"""Returns the cleaned up label of the atom (C2 => C, H4 = H) (includes dummies) (0-indexed)
>>> print(H2OH2O.fsymbol(4))
O
"""
return self.full_atoms[atom].symbol()
def flabel(self, atom):
"""Returns the original label of the atom (0-indexed) as given in
the input file (C2, H4) (includes dummies)
>>> print(H2OH2O.flabel(4))
O2
"""
return self.full_atoms[atom].label()
def fcharge(self, atom):
"""Returns charge of atom (0-indexed, includes dummies).
Related to SAD guess in libmints version.
>>> print(H2OH2O.fcharge(4))
8.0
"""
return self.full_atoms[atom].charge()
def fmass_number(self, atom):
"""Mass number of atom (0-indexed)
>>> print(H2OH2O.fmass_number(4))
1
"""
return self.full_atoms[atom].A()
# <<< Simple Methods for Fragmentation >>>
def nfragments(self):
"""The number of fragments in the molecule.
>>> print(H2OH2O.nfragments())
2
"""
return len(self.fragments)
def nactive_fragments(self):
"""The number of active fragments in the molecule.
>>> print(H2OH2O.nactive_fragments())
2
"""
n = 0
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Real':
n += 1
return n
def activate_all_fragments(self):
"""Sets all fragments in the molecule to be active."""
self.lock_frame = False
for fr in range(self.nfragments()):
self.fragment_types[fr] = 'Real'
def set_active_fragment(self, fr):
"""Tags fragment index *fr* as composed of real atoms."""
self.lock_frame = False
self.fragment_types[fr - 1] = 'Real'
def set_active_fragments(self, reals):
"""Tags the fragments in array *reals* as composed of real atoms."""
self.lock_frame = False
for fr in reals:
self.fragment_types[fr - 1] = 'Real'
def set_ghost_fragment(self, fr):
"""Tags fragment index *fr* as composed of ghost atoms."""
self.lock_frame = False
self.fragment_types[fr - 1] = 'Ghost'
def set_ghost_fragments(self, ghosts):
"""Tags the fragments in array *ghosts* as composed of ghost atoms."""
self.lock_frame = False
for fr in ghosts:
self.fragment_types[fr - 1] = 'Ghost'
def deactivate_all_fragments(self):
"""Sets all fragments in the molecule to be inactive."""
self.lock_frame = False
for fr in range(self.nfragments()):
self.fragment_types[fr] = 'Absent'
def extract_subsets(self, reals, ghosts=[]):
"""Wrapper for :py:func:`~qcdb.molecule.extract_fragments`.
See note there. This function can be used as long as not
in psi4 input file. Use extract_fragments directly, then.
>>> H2OH2O.extract_subsets(2) # monomer B, unCP-corrected
>>> H2OH2O.extract_subsets(2,1) # monomer B, CP-corrected
>>> obj.extract_subsets(1,[2,3]) # monomer A, CP-corrected if obj is tri-molecular complex
"""
return self.extract_fragments(reals, ghosts=ghosts)
def extract_fragments(self, reals, ghosts=[]):
"""Makes a copy of the molecule, returning a new molecule with
only certain fragment atoms present as either ghost or real atoms
*reals*: The list or int of fragments (1-indexed) that should be present in the molecule as real atoms.
*ghosts*: The list or int of fragments (1-indexed) that should be present in the molecule as ghosts.
(method name in libmints is extract_subsets. This is different
in qcdb because the psi4 input parser tries to process lines with
that term, giving rise to Boost:Python type conlicts.) See usage
at :py:func:`~qcdb.molecule.extract_fragments`.
"""
lreals = []
try:
for idx in reals:
lreals.append(idx - 1)
except TypeError:
lreals = [reals - 1]
lghosts = []
try:
for idx in ghosts:
lghosts.append(idx - 1)
except TypeError:
lghosts = [ghosts - 1]
if len(lreals) + len(lghosts) > self.nfragments():
raise ValidationError('Molecule::extract_fragments: sum of real- and ghost-atom subsets is greater than the number of subsets')
subset = self.clone()
subset.deactivate_all_fragments()
for fr in lreals:
subset.set_active_fragment(fr + 1) # the active fragment code subtracts 1
for fr in lghosts:
subset.set_ghost_fragment(fr + 1) # the ghost fragment code subtracts 1
subset.update_geometry()
return subset
def get_fragments(self):
"""The list of atom ranges defining each fragment from parent molecule.
Returns
-------
list of lists
(nfr, 2) actual member data, for constructor use only.
"""
return self.fragments
def get_fragment_types(self):
"""A list describing how to handle each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_types
def get_fragment_charges(self):
"""The charge of each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_charges
def get_fragment_multiplicities(self):
"""The multiplicity of each fragment.
Returns
-------
list
(nfr, ) actual member data, for constructor use only.
"""
return self.fragment_multiplicities
# <<< Methods for Construction >>>
def create_molecule_from_string(self, text):
"""Given a string *text* of psi4-style geometry specification
(including newlines to separate lines), builds a new molecule.
Called from constructor.
"""
raise FeatureDeprecated("qcdb.Molecule.create_molecule_from_string. Replace with qcdb.Molecule.from_string(..., dtype='psi4+')")
def init_with_checkpoint(self, chkpt):
""" **NYI** Pull information from the *chkpt* object passed
(method name in libmints is init_with_chkpt)
"""
raise FeatureNotImplemented('Molecule::init_with_checkpoint') # FINAL
def init_with_io(self, psio):
""" **NYI** Pull information from a chkpt object created from psio
(method name in libmints is init_with_psio)
"""
raise FeatureNotImplemented('Molecule::init_with_io') # FINAL
def clone(self):
"""Returns new, independent Molecule object.
>>> dimer = H2OH2O.clone()
"""
return copy.deepcopy(self)
# <<< Methods for Printing >>>
def print_out(self):
"""Print the molecule.
(method name in libmints is print)
>>> H2OH2O.print_out()
Geometry (in Angstrom), charge = -2, multiplicity = 3:
Center X Y Z
------------ ----------------- ----------------- -----------------
O -1.551007000000 -0.114520000000 0.000000000000
H -1.934259000000 0.762503000000 0.000000000000
H -0.599677000000 0.040712000000 0.000000000000
O 1.350625000000 0.111469000000 0.000000000000
H 1.680398000000 -0.373741000000 -0.758561000000
H 1.680398000000 -0.373741000000 0.758561000000
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Angstrom' if self.units() == 'Angstrom' else 'Bohr', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
geom = self.atoms[i].compute()
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
for j in range(3):
text += """ %17.12f""" % (geom[j])
text += "\n"
# TODO if (Process::environment.options.get_int("PRINT") > 2) {
text += "\n"
for i in range(self.natom()):
Astr = '' if self.mass_number(i) == -1 else str(self.mass_number(i))
text += """ %8s\n""" % (Astr + self.label(i))
for bas in self.atoms[i].basissets().keys():
text += """ %-15s %-20s""" % (bas,
self.atoms[i].basissets()[bas])
if bas in self.atoms[i].shells():
text += """%s""" % (self.atoms[i].shells()[bas])
text += '\n'
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_out_in_bohr(self):
"""Print the molecule in Bohr. Same as :py:func:`print_out` only in Bohr.
(method name in libmints is print_in_bohr)
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Bohr', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
text += (""" %17.12f""" * 3).format(*(self.xyz(i)))
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_out_in_angstrom(self):
"""Print the molecule in Angstroms. Same as :py:func:`print_out` only always in Angstroms.
(method name in libmints is print_in_angstrom)
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
('Angstrom', self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.natom()):
text += """ %3s%-7s """ % ("" if self.Z(i) else "Gh(", self.symbol(i) + ("" if self.Z(i) else ")"))
text += (""" %17.12f""" * 3).format(*self.xyz(i) * qcel.constants.bohr2angstroms)
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_full(self):
"""Print full atom list. Same as :py:func:`print_out` only displays dummy atoms.
"""
text = ""
if self.natom():
if self.pg:
text += """ Molecular point group: %s\n""" % (self.pg.symbol())
if self.full_pg:
text += """ Full point group: %s\n\n""" % (self.get_full_point_group())
text += """ Geometry (in %s), charge = %d, multiplicity = %d:\n\n""" % \
(self.units(), self.molecular_charge(), self.multiplicity())
text += """ Center X Y Z \n"""
text += """ ------------ ----------------- ----------------- -----------------\n"""
for i in range(self.nallatom()):
geom = self.full_atoms[i].compute()
text += """ %3s%-7s """ % ("" if self.fZ(i) else "Gh(", self.fsymbol(i) + ("" if self.fZ(i) else ")"))
for j in range(3):
text += """ %17.12f""" % (geom[j])
text += "\n"
text += "\n"
else:
text += " No atoms in this molecule.\n"
print(text)
# TODO outfile
def print_in_input_format(self):
"""Print the molecule in the same format that the user provided.
"""
text = ""
if self.nallatom():
text += " Geometry (in %s), charge = %d, multiplicity = %d:\n\n" % \
("Angstrom" if self.units() == 'Angstrom' else "Bohr",
self.molecular_charge(), self.multiplicity())
for i in range(self.nallatom()):
if self.fZ(i) or self.fsymbol(i) == "X":
text += " %-8s" % (self.fsymbol(i))
else:
text += " %-8s" % ("Gh(" + self.fsymbol(i) + ")")
text += self.full_atoms[i].print_in_input_format()
text += "\n"
if len(self.geometry_variables):
for vb, val in self.geometry_variables.items():
text += """ %-10s=%16.10f\n""" % (vb, val)
text += "\n"
print(text)
# TODO outfile
def everything(self):
"""Quick print of class data"""
text = """ ==> qcdb Molecule %s <==\n\n""" % (self.name())
text += """ Natom %d\t\tNallatom %d\n""" % (self.natom(), self.nallatom())
text += """ charge %d\t\tspecified? NA\n""" % (self.molecular_charge())
text += """ multiplicity %d\t\tspecified? NA\n""" % (self.multiplicity())
text += """ units %s\tconversion %f\n""" % (self.units(), self.input_units_to_au())
text += """ DOcom? %s\t\tDONTreorient? %s\n""" % (self.PYmove_to_com, self.orientation_fixed())
text += """ reinterpret? %s\t\tlock_frame? %s\n""" % (self.PYreinterpret_coordentries, self.lock_frame)
text += """ input symm %s\n""" % (self.symmetry_from_input())
text += """ Nfragments %d\t\tNactive %d\n""" % (self.nfragments(), self.nactive_fragments())
text += """ zmat? %s\n""" % (self.has_zmatrix())
print(text)
def create_psi4_string_from_molecule(self, force_c1=False):
"""Regenerates a input file molecule specification string from the
current state of the Molecule. Contains geometry info,
fragmentation, charges and multiplicities, and any frame
restriction.
"""
text = ""
if self.nallatom():
# append units and any other non-default molecule keywords
text += " units %-s\n" % ("Angstrom" if self.units() == 'Angstrom' else "Bohr")
if not self.PYmove_to_com:
text += " no_com\n"
if self.PYfix_orientation:
text += " no_reorient\n"
if force_c1:
text += " symmetry c1\n"
text += " {} {}\n --\n".format(self.molecular_charge(), self.multiplicity())
# append atoms and coordentries and fragment separators with charge and multiplicity
Pfr = 0
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Absent' and not self.has_zmatrix():
continue
text += "%s %s%d %d\n" % (
"" if Pfr == 0 else " --\n",
"#" if self.fragment_types[fr] == 'Ghost' or self.fragment_types[fr] == 'Absent' else "",
self.fragment_charges[fr], self.fragment_multiplicities[fr])
Pfr += 1
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
if self.fragment_types[fr] == 'Absent':
text += " %-8s" % ("X")
elif self.fZ(at) or self.fsymbol(at) == "X":
text += " %-8s" % (self.flabel(at))
else:
text += " %-8s" % ("Gh(" + self.flabel(at) + ")")
text += " %s" % (self.full_atoms[at].print_in_input_format())
text += "\n"
# append any coordinate variables
if len(self.geometry_variables):
for vb, val in self.geometry_variables.items():
text += """ %-10s=%16.10f\n""" % (vb, val)
text += "\n"
return text
# <<< Involved Methods for Coordinates >>>
def get_coord_value(self, vstr):
"""Attempts to interpret a string as a double, if not it assumes it's a variable.
"""
vstr = vstr.upper()
realNumber = re.compile(r"""[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?""", re.VERBOSE)
# handle number values
if realNumber.match(vstr):
return NumberValue(float(vstr))
# handle variable values, whether defined or not
else:
if vstr == 'TDA':
self.geometry_variables[vstr] = 360.0 * math.atan(math.sqrt(2)) / math.pi
# handle negative variable values (ignore leading '-' and return minus the value)
if vstr[0] == '-':
self.all_variables.append(vstr[1:])
return VariableValue(vstr[1:], self.geometry_variables, True)
# handle normal variable values
else:
self.all_variables.append(vstr)
return VariableValue(vstr, self.geometry_variables)
def add_atom(self, Z, x, y, z, symbol, mass=0.0, charge=0.0, label='', A=-1, lineno=-1):
"""Add an atom to the molecule
*Z* atomic number
*x* cartesian coordinate
*y* cartesian coordinate
*z* cartesian coordinate
*symbol* atomic symbol to use
*mass* mass to use if non standard
*charge* charge to use if non standard
*label* extended symbol with user info
*A* mass number
*lineno* line number when taken from a string
"""
self.lock_frame = False
self.set_has_cartesian(True)
if label == '':
label = symbol
#if self.atom_at_position([x, y, z]) == -1:
if True:
# Dummies go to full_atoms, ghosts need to go to both.
self.full_atoms.append(CartesianEntry(self.nallatom(), Z, charge, mass, symbol, label, A,
NumberValue(x), NumberValue(y), NumberValue(z)))
if label.upper() != 'X':
self.atoms.append(self.full_atoms[-1])
else:
raise ValidationError("Molecule::add_atom: Adding atom on top of an existing atom.")
# For use with atoms defined with ZMAT or variable values, i.e., not Cartesian and NumberValue
def add_unsettled_atom(self, Z, anchor, symbol, mass=0.0, charge=0.0, label='', A=-1):
self.lock_frame = False
numEntries = len(anchor)
currentAtom = len(self.full_atoms)
# handle cartesians
if numEntries == 3:
self.set_has_cartesian(True)
xval = self.get_coord_value(anchor[0])
yval = self.get_coord_value(anchor[1])
zval = self.get_coord_value(anchor[2])
self.full_atoms.append(CartesianEntry(currentAtom, Z, charge,
mass, symbol, label, A,
xval, yval, zval))
# handle first line of Zmat
elif numEntries == 0:
self.set_has_zmatrix(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A))
# handle second line of Zmat
elif numEntries == 2:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
rval = self.get_coord_value(anchor[1])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval))
# handle third line of Zmat
elif numEntries == 4:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
aTo = self.get_anchor_atom(anchor[2], '')
if aTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[2]))
if aTo == rTo:
raise ValidationError("Error: atom used multiple times")
rval = self.get_coord_value(anchor[1])
aval = self.get_coord_value(anchor[3])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
if self.full_atoms[aTo].symbol() == 'X':
aval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval,
self.full_atoms[aTo], aval))
# handle fourth line of Zmat
elif numEntries == 6:
self.set_has_zmatrix(True)
rTo = self.get_anchor_atom(anchor[0], '')
if rTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[0]))
aTo = self.get_anchor_atom(anchor[2], '')
if aTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[2]))
dTo = self.get_anchor_atom(anchor[4], '')
if dTo >= currentAtom:
raise ValidationError("Error finding defined anchor atom {}".format(anchor[4]))
if aTo == rTo or rTo == dTo or aTo == dTo: # for you star wars fans
raise ValidationError("Error: atom used multiple times")
rval = self.get_coord_value(anchor[1])
aval = self.get_coord_value(anchor[3])
dval = self.get_coord_value(anchor[5])
if self.full_atoms[rTo].symbol() == 'X':
rval.set_fixed(True)
if self.full_atoms[aTo].symbol() == 'X':
aval.set_fixed(True)
if self.full_atoms[dTo].symbol() == 'X':
dval.set_fixed(True)
self.full_atoms.append(ZMatrixEntry(currentAtom, Z, charge,
mass, symbol, label, A,
self.full_atoms[rTo], rval,
self.full_atoms[aTo], aval,
self.full_atoms[dTo], dval))
else:
raise ValidationError('Illegal geometry specification (neither Cartesian nor Z-Matrix)')
def atom_entry(self, atom):
"""Returns the CoordEntry for an atom."""
return self.atoms[atom]
def atom_at_position(self, b, tol=0.05):
"""Tests to see if an atom is at the passed position *b* in Bohr with a tolerance *tol*.
>>> print(H2OH2O.atom_at_position([1.35*(1.0/psi_bohr2angstroms), 0.10*(1.0/psi_bohr2angstroms), 0.0*(1.0/psi_bohr2angstroms)]))
3
"""
if len(b) != 3:
raise ValidationError('Molecule::atom_at_position: Argument vector not of length 3\n')
if self.natom() == 0:
return -1
if self.wholegeom is not None:
current_geom = self.wholegeom
else:
current_geom = self.geometry(np_out=True)
shifted_geom = current_geom - np.asarray(b)
dist2 = np.sum(np.square(shifted_geom), axis=1)
distminidx = np.argmin(dist2)
if dist2[distminidx] < tol * tol:
return distminidx
else:
return -1
def is_variable(self, vstr):
"""Checks to see if the variable str is in the list, returns
true if it is, and returns false if not.
>>> H2OH2O.is_variable('R')
False
"""
return True if vstr.upper() in self.all_variables else False
def get_variable(self, vstr):
"""Checks to see if the variable str is in the list, sets it to
val and returns true if it is, and returns false if not.
"""
vstr = vstr.upper()
try:
return self.geometry_variables[vstr]
except KeyError:
raise ValidationError('Molecule::get_variable: Geometry variable %s not known.\n' % (vstr))
def set_variable(self, vstr, val):
"""Assigns the value val to the variable labelled string in the
list of geometry variables. Also calls update_geometry()
"""
self.lock_frame = False
self.geometry_variables[vstr.upper()] = val
print("""Setting geometry variable %s to %f""" % (vstr.upper(), val))
try:
self.update_geometry()
except IncompleteAtomError:
# Update geometry might have added some atoms, delete them to be safe.
self.atoms = []
# TODO outfile
def set_geometry_variable(self, vstr, val):
"""Plain assigns the vlue val to the variable labeled string in the list of geometry variables."""
self.geometry_variables[vstr.upper()] = val
def get_anchor_atom(self, vstr, line):
"""Attempts to interpret a string *vstr* as an atom specifier in
a zmatrix. Takes the current *line* for error message printing.
Returns the atom number (adjusted to zero-based counting).
"""
integerNumber = re.compile(r"(-?\d+)", re.IGNORECASE)
if integerNumber.match(vstr):
# This is just a number, return it
return int(vstr) - 1
else:
# Look to see if this string is known
for i in range(self.nallatom()):
if self.full_atoms[i].label() == vstr:
return i
raise ValidationError("Molecule::get_anchor_atom: Illegal value %s in atom specification on line %s.\n" % (vstr, line))
def geometry(self, np_out=False):
"""Returns the geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]]
"""
geom = np.asarray([self.atoms[at].compute() for at in range(self.natom())])
geom *= self.input_units_to_au()
if np_out:
return geom
else:
return geom.tolist()
def full_geometry(self, np_out=False):
"""Returns the full (dummies included) geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.full_geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [0.0, 0.0, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]]
"""
geom = np.asarray([self.full_atoms[at].compute() for at in range(self.nallatom())])
geom *= self.input_units_to_au()
if np_out:
return geom
else:
return geom.tolist()
def set_geometry(self, geom):
"""Sets the geometry, given a N X 3 array of coordinates *geom* in Bohr.
>>> H2OH2O.set_geometry([[1,2,3],[4,5,6],[7,8,9],[-1,-2,-3],[-4,-5,-6],[-7,-8,-9]])
"""
self.lock_frame = False
for at in range(self.natom()):
self.atoms[at].set_coordinates(geom[at][0] / self.input_units_to_au(),
geom[at][1] / self.input_units_to_au(),
geom[at][2] / self.input_units_to_au())
def set_full_geometry(self, geom):
"""Sets the full geometry (dummies included), given a N X 3 array of coordinates *geom* in Bohr.
>>> H2OH2O.set_full geometry([[1,2,3],[4,5,6],[7,8,9],[0,0,0],[-1,-2,-3],[-4,-5,-6],[-7,-8,-9]])
"""
self.lock_frame = False
for at in range(self.nallatom()):
self.full_atoms[at].set_coordinates(geom[at][0] / self.input_units_to_au(),
geom[at][1] / self.input_units_to_au(),
geom[at][2] / self.input_units_to_au())
def distance_matrix(self):
"""Computes a matrix depicting distances between atoms. Prints
formatted and returns array.
>>> H2OH2O.distance_matrix()
Interatomic Distances (Angstroms)
[1] [2] [3] [4] [5] [6]
[1] 0.00000
[2] 0.95711 0.00000
[3] 0.96391 1.51726 0.00000
[4] 2.91042 3.34878 1.95159 0.00000
[5] 3.32935 3.86422 2.43843 0.95895 0.00000
[6] 3.32935 3.86422 2.43843 0.95895 1.51712 0.00000
"""
distm = qcel.util.distance_matrix(self.geometry(np_out=True), self.geometry(np_out=True))
distm *= qcel.constants.bohr2angstroms
text = " Interatomic Distances (Angstroms)\n\n "
for i in range(self.natom()):
text += '%11s ' % ('[' + str(i + 1) + ']')
text += "\n"
for i in range(self.natom()):
text += ' %-8s ' % ('[' + str(i + 1) + ']')
for j in range(self.natom()):
if j > i:
continue
else:
text += '%10.5f ' % (distm(i, j))
text += "\n"
text += "\n\n"
print(text)
return distm
# TODO outfile
def print_distances(self):
"""Print the geometrical parameters (distances) of the molecule.
suspect libmints version actually prints Bohr.
>>> print(H2OH2O.print_distances())
Interatomic Distances (Angstroms)
Distance 1 to 2 0.957
Distance 1 to 3 0.964
Distance 1 to 4 2.910
...
"""
text = " Interatomic Distances (Angstroms)\n\n"
for i in range(self.natom()):
for j in range(i + 1, self.natom()):
eij = sub(self.xyz(j), self.xyz(i))
dist = norm(eij) * qcel.constants.bohr2angstroms
text += " Distance %d to %d %-8.3lf\n" % (i + 1, j + 1, dist)
text += "\n\n"
return text
# TODO outfile
def print_bond_angles(self):
"""Print the geometrical parameters (bond_angles) of the molecule.
>>> print(H2OH2O.print_bond_angles())
Bond Angles (degrees)
Angle 2-1-3: 104.337
Angle 2-1-4: 109.152
Angle 2-1-5: 117.387
...
"""
text = " Bond Angles (degrees)\n\n"
for j in range(self.natom()):
for i in range(self.natom()):
if j == i:
continue
for k in range(i + 1, self.natom()):
if j == k:
continue
eji = sub(self.xyz(i), self.xyz(j))
eji = normalize(eji)
ejk = sub(self.xyz(k), self.xyz(j))
ejk = normalize(ejk)
dotproduct = dot(eji, ejk)
phi = 180.0 * math.acos(dotproduct) / math.pi
text += " Angle %d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, phi)
text += "\n\n"
return text
# TODO outfile
def print_dihedrals(self):
"""Print the geometrical parameters (dihedrals) of the molecule.
>>> print(H2OH2O.print_dihedrals())
Dihedral Angles (Degrees)
Dihedral 1-2-3-4: 180.000
Dihedral 1-2-3-5: 133.511
Dihedral 1-2-3-6: 133.511
...
"""
text = " Dihedral Angles (Degrees)\n\n"
for i in range(self.natom()):
for j in range(self.natom()):
if i == j:
continue
for k in range(self.natom()):
if i == k or j == k:
continue
for l in range(self.natom()):
if i == l or j == l or k == l:
continue
eij = sub(self.xyz(j), self.xyz(i))
eij = normalize(eij)
ejk = sub(self.xyz(k), self.xyz(j))
ejk = normalize(ejk)
ekl = sub(self.xyz(l), self.xyz(k))
ekl = normalize(ekl)
# Compute angle ijk
angleijk = math.acos(dot(scale(eij, -1.0), ejk))
# Compute angle jkl
anglejkl = math.acos(dot(scale(ejk, -1.0), ekl))
# compute term1 (eij x ejk)
term1 = cross(eij, ejk)
# compute term2 (ejk x ekl)
term2 = cross(ejk, ekl)
numerator = dot(term1, term2)
denominator = math.sin(angleijk) * math.sin(anglejkl)
try:
costau = numerator / denominator
except ZeroDivisionError:
costau = 0.0
if costau > 1.00 and costau < 1.000001:
costau = 1.00
if costau < -1.00 and costau > -1.000001:
costau = -1.00
tau = 180.0 * math.acos(costau) / math.pi
text += " Dihedral %d-%d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, l + 1, tau)
text += "\n\n"
return text
# TODO outfile
def print_out_of_planes(self):
"""Print the geometrical parameters (out_of_planes) of the molecule.
>>> print(H2OH2O.print_out_of_planes())
Out-Of-Plane Angles (Degrees)
Out-of-plane 1-2-3-4: 0.000
Out-of-plane 1-2-3-5: -7.373
Out-of-plane 1-2-3-6: 7.373
...
"""
text = " Out-Of-Plane Angles (Degrees)\n\n"
for i in range(self.natom()):
for j in range(self.natom()):
if i == j:
continue
for k in range(self.natom()):
if i == k or j == k:
continue
for l in range(self.natom()):
if i == l or j == l or k == l:
continue
# Compute vectors we need first
elj = sub(self.xyz(j), self.xyz(l))
elj = normalize(elj)
elk = sub(self.xyz(k), self.xyz(l))
elk = normalize(elk)
eli = sub(self.xyz(i), self.xyz(l))
eli = normalize(eli)
# Denominator
denominator = math.sin(math.acos(dot(elj, elk)))
# Numerator
eljxelk = cross(elj, elk)
numerator = dot(eljxelk, eli)
# compute angle
try:
sinetheta = numerator / denominator
except ZeroDivisionError:
sinetheta = 0.0
if sinetheta > 1.00:
sinetheta = 1.000
if sinetheta < -1.00:
sinetheta = -1.000
theta = 180.0 * math.asin(sinetheta) / math.pi
text += " Out-of-plane %d-%d-%d-%d: %8.3lf\n" % (i + 1, j + 1, k + 1, l + 1, theta)
text += "\n\n"
return text
# TODO outfile
def reinterpret_coordentry(self, rc):
"""Do we reinterpret coordentries during a call to update_geometry?
(method name in libmints is set_reinterpret_coordentry)
"""
self.PYreinterpret_coordentries = rc
def reinterpret_coordentries(self):
"""Reinterpret the fragments for reals/ghosts and build the atom list.
"""
self.atoms = []
for item in self.full_atoms:
item.invalidate()
temp_charge = self.PYmolecular_charge
temp_multiplicity = self.PYmultiplicity
self.PYmolecular_charge = 0
high_spin_multiplicity = 1
for fr in range(self.nfragments()):
if self.fragment_types[fr] == 'Absent':
continue
if self.fragment_types[fr] == 'Real':
self.PYmolecular_charge += self.fragment_charges[fr]
high_spin_multiplicity += self.fragment_multiplicities[fr] - 1
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
self.full_atoms[at].compute()
self.full_atoms[at].set_ghosted(self.fragment_types[fr] == 'Ghost')
if self.full_atoms[at].symbol() != 'X':
self.atoms.append(self.full_atoms[at])
# TODO: This is a hack to ensure that set_multiplicity and set_molecular_charge
# work for single-fragment molecules.
if self.nfragments() < 2:
self.PYmolecular_charge = temp_charge
self.PYmultiplicity = temp_multiplicity
else:
if (self.fragment_types.count('Real') == len(self.fragments)) and ((temp_multiplicity % 2) == (high_spin_multiplicity % 2)):
# give low-spin a chance, so long as ghost/absent fragments can't be complicating the picture
self.PYmultiplicity = temp_multiplicity
else:
self.PYmultiplicity = high_spin_multiplicity
def update_geometry(self):
"""Updates the geometry, by (re)interpreting the string used to
create the molecule, and the current values of the variables.
The atoms list is cleared, and then rebuilt by this routine.
This function must be called after first instantiation of Molecule.
>>> H2 = qcdb.Molecule("H\\nH 1 0.74\\n")
>>> print(H2.natom())
0
>>> H2.update_geometry()
>>> print(H2.natom())
2
"""
if self.nallatom() == 0:
print("Warning: There are no quantum mechanical atoms in this molecule.")
# Idempotence condition
if self.lock_frame:
return
#print("beginning update_geometry:")
#self.print_full()
if self.PYreinterpret_coordentries:
self.reinterpret_coordentries()
#print("after reinterpret_coordentries:")
#self.print_full()
if self.PYmove_to_com:
self.move_to_com()
#print("after com:")
#self.print_full()
self.wholegeom = self.geometry(np_out=True)
# If the no_reorient command was given, don't reorient
if not self.PYfix_orientation:
# Now we need to rotate the geometry to its symmetry frame
# to align the axes correctly for the point group
# symmetry_frame looks for the highest point group so that we can align
# the molecule according to its actual symmetry, rather than the symmetry
# the the user might have provided.
frame = self.symmetry_frame()
self.rotate_full(frame)
#print("after rotate:")
#self.print_full()
self.wholegeom = self.geometry(np_out=True)
# Recompute point group of the molecule, so the symmetry info is updated to the new frame
self.set_point_group(self.find_point_group())
self.set_full_point_group()
self.wholegeom = self.geometry(np_out=True)
# Disabling symmetrize for now if orientation is fixed, as it is not
# correct. We may want to fix this in the future, but in some cases of
# finite-differences the set geometry is not totally symmetric anyway.
# Symmetrize the molecule to remove any noise
self.symmetrize()
#print("after symmetry:")
#self.print_full()
self.wholegeom = None
self.lock_frame = True
# <<< Methods for Miscellaneous >>>
def clear(self):
"""Zero it out."""
self.lock_frame = False
self.atoms = []
self.full_atoms = []
def nuclear_repulsion_energy(self):
"""Computes nuclear repulsion energy.
>>> print(H2OH2O.nuclear_repulsion_energy())
36.6628478528
"""
e = 0.0
for at1 in range(self.natom()):
for at2 in range(self.natom()):
if at2 < at1:
Zi = self.Z(at1)
Zj = self.Z(at2)
dist = distance(self.xyz(at1), self.xyz(at2))
e += Zi * Zj / dist
return e
def nuclear_repulsion_energy_deriv1(self):
"""Computes nuclear repulsion energy derivatives
>>> print(H2OH2O.nuclear_repulsion_energy_deriv1())
[[3.9020946901323774, 2.76201566471991, 0.0], [1.3172905807089021, -2.3486366050337293, 0.0], [-1.8107598525022435, -0.32511212499256564, 0.0], [-1.217656141385739, -2.6120090867576717, 0.0], [-1.0954846384766488, 1.2618710760320282, 2.1130743287465603], [-1.0954846384766488, 1.2618710760320282, -2.1130743287465603]]
"""
de = []
for i in range(self.natom()):
entry = [0.0, 0.0, 0.0]
for j in range(self.natom()):
if i != j:
temp = distance(self.xyz(i), self.xyz(j)) ** 3.0
Zi = self.Z(i)
Zj = self.Z(j)
entry[0] -= (self.x(i) - self.x(j)) * Zi * Zj / temp
entry[1] -= (self.y(i) - self.y(j)) * Zi * Zj / temp
entry[2] -= (self.z(i) - self.z(j)) * Zi * Zj / temp
de.append(entry)
return de
def nuclear_repulsion_energy_deriv2(self):
""" **NYI** Computes nuclear repulsion energy second derivatives"""
raise FeatureNotImplemented('Molecule::nuclear_repulsion_energy_deriv2') # FINAL
def set_basis_all_atoms(self, name, role="BASIS"):
"""Assigns basis *name* to all atoms."""
for atom in self.full_atoms:
atom.set_basisset(name, role)
def set_basis_by_symbol(self, symbol, name, role="BASIS"):
"""Assigns basis *name* to all *symbol* atoms."""
for atom in self.full_atoms:
if symbol.upper() == atom.symbol():
atom.set_basisset(name, role)
def clear_basis_all_atoms(self):
"""Remove all basis information from atoms."""
for atom in self.full_atoms:
atom.PYbasissets = collections.OrderedDict()
def set_basis_by_number(self, number, name, role="BASIS"):
"""Assigns basis *name* to atom number *number* (0-indexed, excludes dummies)."""
# change from libmints to 0-indexing and to real/ghost numbering, dummies not included (libmints >= error)
if number >= self.natom():
raise ValidationError("Molecule::set_basis_by_number: Basis specified for atom %d, but there are only %d atoms in this molecule." % \
(number, self.natom()))
self.atoms[number].set_basisset(name, role)
def set_basis_by_label(self, label, name, role="BASIS"):
"""Assigns basis *name* to all atoms with *label*."""
for atom in self.full_atoms:
if label.upper() == atom.label():
atom.set_basisset(name, role)
def set_shell_by_number(self, number, bshash, role="BASIS"):
"""Assigns BasisSet *bshash* to atom number *number* (0-indexed, excludes dummies)."""
self.lock_frame = False
if number >= self.natom():
raise ValidationError("Molecule::set_shell_by_number: Basis specified for atom %d, but there are only %d atoms in this molecule." % \
(number, self.natom()))
self.atoms[number].set_shell(bshash, role)
def nfrozen_core(self, depth=False):
"""Number of frozen core for molecule given freezing state.
>>> print(H2OH2O.nfrozen_core())
2
"""
if depth == False or depth.upper() == 'FALSE':
return 0
elif depth == True or depth.upper() == 'TRUE':
# Freeze the number of core electrons corresponding to the
# nearest previous noble gas atom. This means that the 4p block
# will still have 3d electrons active. Alkali earth atoms will
# have one valence electron in this scheme.
nfzc = 0
for A in range(self.natom()):
if self.Z(A) > 2:
nfzc += 1
if self.Z(A) > 10:
nfzc += 4
if self.Z(A) > 18:
nfzc += 4
if self.Z(A) > 36:
nfzc += 9
if self.Z(A) > 54:
nfzc += 9
if self.Z(A) > 86:
nfzc += 16
if self.Z(A) > 108:
raise ValidationError("Molecule::nfrozen_core: Invalid atomic number")
return nfzc
else:
raise ValidationError("Molecule::nfrozen_core: Frozen core '%s' is not supported, options are {true, false}." % (depth))
# <<< Involved Methods for Frame >>>
def translate(self, r):
"""Translates molecule by r.
>>> H2OH2O.translate([1.0, 1.0, 0.0])
"""
temp = [None, None, None]
for at in range(self.nallatom()):
temp = scale(self.full_atoms[at].compute(), self.input_units_to_au())
temp = add(temp, r)
temp = scale(temp, 1.0 / self.input_units_to_au())
self.full_atoms[at].set_coordinates(temp[0], temp[1], temp[2])
def center_of_mass(self):
"""Computes center of mass of molecule (does not translate molecule).
>>> H2OH2O.center_of_mass()
[-0.12442647346606871, 0.00038657002584110707, 0.0]
"""
ret = [0.0, 0.0, 0.0]
total_m = 0.0
for at in range(self.natom()):
m = self.mass(at)
ret = add(ret, scale(self.xyz(at), m))
total_m += m
ret = scale(ret, 1.0 / total_m)
return ret
def move_to_com(self):
"""Moves molecule to center of mass
"""
com = scale(self.center_of_mass(), -1.0)
self.translate(com)
def set_com_fixed(self, _fix=True):
""" **NYI** Fix the center of mass at its current frame.
Not used in libmints so not implemented.
"""
raise FeatureNotImplemented('Molecule::set_com_fixed') # FINAL
# def inertia_tensor(self):
# """Compute inertia tensor.
#
# >>> print(H2OH2O.inertia_tensor())
# [[8.704574864178731, -8.828375721817082, 0.0], [-8.828375721817082, 280.82861714077666, 0.0], [0.0, 0.0, 281.249500988553]]
#
# """
# tensor = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
#
# for i in range(self.natom()):
# # I(alpha, alpha)
# tensor[0][0] += self.mass(i) * (self.y(i) * self.y(i) + self.z(i) * self.z(i))
# tensor[1][1] += self.mass(i) * (self.x(i) * self.x(i) + self.z(i) * self.z(i))
# tensor[2][2] += self.mass(i) * (self.x(i) * self.x(i) + self.y(i) * self.y(i))
#
# # I(alpha, beta)
# tensor[0][1] -= self.mass(i) * self.x(i) * self.y(i)
# tensor[0][2] -= self.mass(i) * self.x(i) * self.z(i)
# tensor[1][2] -= self.mass(i) * self.y(i) * self.z(i)
#
# # mirror
# tensor[1][0] = tensor[0][1]
# tensor[2][0] = tensor[0][2]
# tensor[2][1] = tensor[1][2]
#
# # Check the elements for zero and make them a hard zero.
# for i in range(3):
# for j in range(3):
# if math.fabs(tensor[i][j]) < ZERO:
# tensor[i][j] = 0.0
# return tensor
def rotational_constants(self, tol=FULL_PG_TOL, return_units='cm^-1'):
"""Compute the rotational constants and moments of inertia.
Parameters
----------
return_units : str, optional
Selector for rotational constants (among: 'GHz', 'MHz', 'cm^-1') or
moments of inertia (among: 'u a0^2', 'u A^2').
Returns
-------
np.array
1 by 3 of rotational constants or moments of inertia in units of `return_units`.
Notes
-----
This used to return a list with inf values as None.
"""
evals, evecs = diagonalize3x3symmat(self.inertia_tensor())
evals = sorted(evals)
evals = np.asarray(evals)
im_amuA = qcel.constants.bohr2angstroms * qcel.constants.bohr2angstroms
im_ghz = qcel.constants.h * qcel.constants.na * 1e14 / (8 * math.pi * math.pi * qcel.constants.bohr2angstroms * qcel.constants.bohr2angstroms)
im_mhz = im_ghz * 1000.
im_cm = im_ghz * 1.e7 / qcel.constants.c
rc_moi = {}
rc_moi['u a0^2'] = evals
rc_moi['u A^2'] = evals * im_amuA
with np.errstate(divide='ignore'):
rc_moi['GHz'] = im_ghz / evals
rc_moi['MHz'] = im_mhz / evals
rc_moi['cm^-1'] = im_cm / evals
fmt = """ {:12} {a:3} {:16.8f} {b:3} {:16.8f} {c:3} {:16.8f}\n"""
text = " Moments of Inertia and Rotational Constants\n\n"
text += fmt.format('[u a0^2]', a='I_A', b='I_B', c='I_C', *rc_moi['u a0^2'])
text += fmt.format('[u A^2]', a='I_A', b='I_B', c='I_C', *rc_moi['u A^2'])
text += fmt.format('[GHz]', a='A', b='B', c='C', *rc_moi['GHz'])
text += fmt.format('[MHz]', a='A', b='B', c='C', *rc_moi['MHz'])
text += fmt.format('[cm^-1]', a='A', b='B', c='C', *rc_moi['cm^-1'])
print(text)
# TODO outfile
return rc_moi[return_units]
def rotor_type(self, tol=FULL_PG_TOL):
"""Returns the rotor type.
>>> H2OH2O.rotor_type()
RT_ASYMMETRIC_TOP
"""
rot_const = self.rotational_constants()
for i in range(3):
if rot_const[i] is None:
rot_const[i] = 0.0
# Determine degeneracy of rotational constants.
degen = 0
for i in range(2):
for j in range(i + 1, 3):
if degen >= 2:
continue
rabs = math.fabs(rot_const[i] - rot_const[j])
tmp = rot_const[i] if rot_const[i] > rot_const[j] else rot_const[j]
if rabs > ZERO:
rel = rabs / tmp
else:
rel = 0.0
if rel < tol:
degen += 1
#print("\tDegeneracy is %d\n" % (degen))
# Determine rotor type
if self.natom() == 1:
rotor_type = 'RT_ATOM'
elif rot_const[0] == 0.0:
rotor_type = 'RT_LINEAR' # 0 < IB == IC inf > B == C
elif degen == 2:
rotor_type = 'RT_SPHERICAL_TOP' # IA == IB == IC A == B == C
elif degen == 1:
rotor_type = 'RT_SYMMETRIC_TOP' # IA < IB == IC A > B == C --or--
# IA == IB < IC A == B > C
else:
rotor_type = 'RT_ASYMMETRIC_TOP' # IA < IB < IC A > B > C
return rotor_type
def rotate(self, R):
"""Rotates the molecule using rotation matrix *R*.
>>> H2OH2O.rotate([[0,-1,0],[-1,0,0],[0,0,1]])
"""
new_geom = zero(3, self.natom())
geom = self.geometry()
new_geom = mult(geom, R)
self.set_geometry(new_geom)
def rotate_full(self, R):
"""Rotates the full molecule using rotation matrix *R*.
>>> H2OH2O.rotate_full([[0,-1,0],[-1,0,0],[0,0,1]])
"""
new_geom = zero(3, self.nallatom())
geom = self.full_geometry()
new_geom = mult(geom, R)
self.set_full_geometry(new_geom)
def com_fixed(self):
"""Get whether or not center of mass is fixed.
>>> H2OH2O.com_fixed()
True
"""
return not self.PYmove_to_com
def fix_com(self, _fix=True):
"""Whether to fix the Cartesian position (True) in its current frame or
to translate to the C.O.M. (False).
(method name in libmints is set_com_fixed)
"""
self.PYmove_to_com = not _fix
def orientation_fixed(self):
"""Get whether or not orientation is fixed.
>>> H2OH2O.orientation_fixed()
True
"""
return self.PYfix_orientation
def fix_orientation(self, _fix=True):
"""Fix the orientation at its current frame
(method name in libmints is set_orientation_fixed)
"""
if _fix:
self.PYfix_orientation = True # tells update_geometry() not to change orientation
# Compute original cartesian coordinates - code coped from update_geometry()
self.atoms = []
for item in self.full_atoms:
item.invalidate()
for fr in range(self.nfragments()):
for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):
self.full_atoms[at].compute()
self.full_atoms[at].set_ghosted(self.fragment_types[fr] == 'Ghost')
if self.full_atoms[at].symbol() != 'X':
self.atoms.append(self.full_atoms[at])
else: # release orientation to be free
self.PYfix_orientation = False
# <<< Methods for Saving >>>
# def save_string_xyz(self, save_ghosts=True):
# """Save a string for a XYZ-style file.
#
# >>> H2OH2O.save_string_xyz()
# 6
# _
# O -1.551007000000 -0.114520000000 0.000000000000
# H -1.934259000000 0.762503000000 0.000000000000
# H -0.599677000000 0.040712000000 0.000000000000
# O 1.350625000000 0.111469000000 0.000000000000
# H 1.680398000000 -0.373741000000 -0.758561000000
# H 1.680398000000 -0.373741000000 0.758561000000
#
# """
# factor = 1.0 if self.PYunits == 'Angstrom' else qcel.constants.bohr2angstroms
#
# N = self.natom()
# if not save_ghosts:
# N = 0
# for i in range(self.natom()):
# if self.Z(i):
# N += 1
# text = "%d\n\n" % (N)
#
# for i in range(self.natom()):
# [x, y, z] = self.atoms[i].compute()
# if save_ghosts or self.Z(i):
# text += '%2s %17.12f %17.12f %17.12f\n' % ((self.symbol(i) if self.Z(i) else "Gh"), \
# x * factor, y * factor, z * factor)
# return text
def save_xyz(self, filename, save_ghosts=True):
"""Save an XYZ file.
>>> H2OH2O.save_xyz('h2o.xyz')
"""
outfile = open(filename, 'w')
outfile.write(self.save_string_xyz(save_ghosts))
outfile.close()
def save_to_checkpoint(self, chkpt, prefix=""):
""" **NYI** Save information to checkpoint file
(method name in libmints is save_to_chkpt)
"""
raise FeatureNotImplemented('Molecule::save_to_checkpoint') # FINAL
# <<< Methods for Symmetry >>>
def has_symmetry_element(self, op, tol=DEFAULT_SYM_TOL):
""" **NYI** Whether molecule satisfies the vector symmetry
operation *op*. Not used by libmints.
"""
raise FeatureNotImplemented('Molecule::has_symmetry_element') # FINAL
for i in range(self.natom()):
result = naivemult(self.xyz(i), op)
atom = self.atom_at_position(result, tol)
if atom != -1:
if not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
else:
return False
return True
def point_group(self):
"""Returns the point group (object) if set"""
if self.pg is None:
raise ValidationError("Molecule::point_group: Molecular point group has not been set.")
return self.pg
def set_point_group(self, pg):
"""Set the point group to object *pg* """
self.pg = pg
# Call this here, the programmer will forget to call it, as I have many times.
self.form_symmetry_information()
def set_full_point_group(self, tol=FULL_PG_TOL):
"""Determine and set FULL point group. self.PYfull_pg_n is highest
order n in Cn. 0 for atoms or infinity.
"""
verbose = 1 # TODO
# Get cartesian geometry and put COM at origin
geom = self.geometry()
com = self.center_of_mass()
for at in range(self.natom()):
geom[at][0] += -com[0]
geom[at][1] += -com[1]
geom[at][2] += -com[2]
# Get rotor type
rotor = self.rotor_type(tol)
if verbose > 2:
print(""" Rotor type : %s""" % (rotor))
# Get the D2h point group from Jet and Ed's code: c1 ci c2 cs d2 c2v c2h d2h
# and ignore the user-specified subgroup in this case.
pg = self.find_highest_point_group(tol)
d2h_subgroup = pg.symbol()
if verbose > 2:
print(""" D2h_subgroup : %s""" % (self.point_group().symbol()))
# Check inversion
v3_zero = [0.0, 0.0, 0.0]
op_i = self.has_inversion(v3_zero, tol)
if verbose > 2:
print(""" Inversion symmetry : %s""" % ('yes' if op_i else 'no'))
x_axis = [1, 0, 0]
y_axis = [0, 1, 0]
z_axis = [0, 0, 1]
rot_axis = [0.0, 0.0, 0.0]
if rotor == 'RT_ATOM': # atoms
self.full_pg = 'ATOM'
self.PYfull_pg_n = 0
elif rotor == 'RT_LINEAR': # linear molecules
self.full_pg = 'D_inf_h' if op_i else 'C_inf_v'
self.PYfull_pg_n = 0
elif rotor == 'RT_SPHERICAL_TOP': # spherical tops
if not op_i: # The only spherical top without inversion is Td.
self.full_pg = 'Td'
self.PYfull_pg_n = 3
else: # Oh or Ih ?
# Oh has a S4 and should be oriented properly already.
test_mat = matrix_3d_rotation(geom, z_axis, math.pi / 2.0, True)
op_symm = equal_but_for_row_order(geom, test_mat, tol)
if verbose > 2:
print(""" S4z : %s""" % ('yes' if op_symm else 'no'))
if op_symm:
self.full_pg = 'Oh'
self.PYfull_pg_n = 4
else:
self.full_pg = 'Ih'
self.PYfull_pg_n = 5
elif rotor == 'RT_ASYMMETRIC_TOP': # asymmetric tops cannot exceed D2h, right?
if d2h_subgroup == 'c1':
self.full_pg = 'C1'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'ci':
self.full_pg = 'Ci'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'c2':
self.full_pg = 'Cn'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'cs':
self.full_pg = 'Cs'
self.PYfull_pg_n = 1
elif d2h_subgroup == 'd2':
self.full_pg = 'Dn'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'c2v':
self.full_pg = 'Cnv'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'c2h':
self.full_pg = 'Cnh'
self.PYfull_pg_n = 2
elif d2h_subgroup == 'd2h':
self.full_pg = 'Dnh'
self.PYfull_pg_n = 2
else:
print(""" Warning: Cannot determine point group.""")
elif rotor in ['RT_SYMMETRIC_TOP', 'RT_PROLATE_SYMMETRIC_TOP', 'RT_OBLATE_SYMMETRIC_TOP']:
# Find principal axis that is unique and make it z-axis.
It = self.inertia_tensor()
I_evals, I_evecs = diagonalize3x3symmat(It)
ev_list = list(zip(I_evals, transpose(I_evecs))) # eigenvectors are cols of I_evecs
ev_list.sort(key=lambda tup: tup[0], reverse=False)
I_evals, I_evecs = zip(*ev_list) # sorted eigenvectors are now rows of I_evecs
if verbose > 2:
print(""" I_evals: %15.10lf %15.10lf %15.10lf""" % (I_evals[0], I_evals[1], I_evals[2]))
unique_axis = 1
if abs(I_evals[0] - I_evals[1]) < tol:
unique_axis = 2
elif abs(I_evals[1] - I_evals[2]) < tol:
unique_axis = 0
# Compute angle between unique axis and the z-axis
old_axis = I_evecs[unique_axis]
ddot = dot(z_axis, old_axis)
if abs(ddot - 1) < 1.0e-10:
phi = 0.0
elif abs(ddot + 1) < 1.0e-10:
phi = math.pi
else:
phi = math.acos(ddot)
# Rotate geometry to put unique axis on the z-axis, if it isn't already.
if abs(phi) > 1.0e-14:
rot_axis = cross(z_axis, old_axis) # right order?
test_mat = matrix_3d_rotation(geom, rot_axis, phi, False)
if verbose > 2:
print(""" Rotating by %lf to get principal axis on z-axis ...""" % (phi))
geom = [row[:] for row in test_mat]
if verbose > 2:
print(""" Geometry to analyze - principal axis on z-axis:""")
for at in range(self.natom()):
print("""%20.15lf %20.15lf %20.15lf""" % (geom[at][0], geom[at][1], geom[at][2]))
print('\n')
# Determine order Cn and Sn of principal axis.
Cn_z = matrix_3d_rotation_Cn(geom, z_axis, False, tol)
if verbose > 2:
print(""" Highest rotation axis (Cn_z) : %d""" % (Cn_z))
Sn_z = matrix_3d_rotation_Cn(geom, z_axis, True, tol)
if verbose > 2:
print(""" Rotation axis (Sn_z) : %d""" % (Sn_z))
# Check for sigma_h (xy plane).
op_sigma_h = False
for at in range(self.natom()):
if abs(geom[at][2]) < tol:
continue # atom is in xy plane
else:
test_atom = [geom[at][0], geom[at][1], -1 * geom[at][2]]
if not atom_present_in_geom(geom, test_atom, tol):
break
else:
op_sigma_h = True
if verbose > 2:
print(""" sigma_h : %s""" % ('yes' if op_sigma_h else 'no'))
# Rotate one off-axis atom to the yz plane and check for sigma_v's.
for at in range(self.natom()):
dist_from_z = math.sqrt(geom[at][0] * geom[at][0] + geom[at][1] * geom[at][1])
if abs(dist_from_z) > tol:
pivot_atom_i = at
break
if pivot_atom_i == self.natom(): # needs to be in else clause?
raise ValidationError("Molecule::set_full_point_group: Not a linear molecule but could not find off-axis atom.")
# Rotate around z-axis to put pivot atom in the yz plane
xy_point = normalize([geom[pivot_atom_i][0], geom[pivot_atom_i][1], 0])
ddot = dot(y_axis, xy_point)
phi = None
if abs(ddot - 1) < 1.0e-10:
phi = 0.0
elif abs(ddot + 1) < 1.0e-10:
phi = math.pi
else:
phi = math.acos(ddot)
is_D = False
if abs(phi) > 1.0e-14:
test_mat = matrix_3d_rotation(geom, z_axis, phi, False)
if verbose > 2:
print(""" Rotating by %8.3e to get atom %d in yz-plane ...""" % (phi, pivot_atom_i + 1))
geom = [row[:] for row in test_mat]
# Check for sigma_v (yz plane).
op_sigma_v = False
for at in range(self.natom()):
if abs(geom[at][0]) < tol:
continue # atom is in yz plane
else:
test_atom = [-1 * geom[at][0], geom[at][1], geom[at][2]]
if not atom_present_in_geom(geom, test_atom, tol):
break
else:
op_sigma_v = True
if verbose > 2:
print(""" sigma_v : %s""" % ('yes' if op_sigma_v else 'no'))
print(""" geom to analyze - one atom in yz plane:""")
for at in range(self.natom()):
print("""%20.15lf %20.15lf %20.15lf""" % (geom[at][0], geom[at][1], geom[at][2]))
print('\n')
# Check for perpendicular C2's.
# Loop through pairs of atoms to find c2 axis candidates.
for i in range(self.natom()):
A = [geom[i][0], geom[i][1], geom[i][2]]
AdotA = dot(A, A)
for j in range(i):
if self.Z(at) != self.Z(j):
continue # ensure same atomic number
B = [geom[j][0], geom[j][1], geom[j][2]] # ensure same distance from com
if abs(AdotA - dot(B, B)) > 1.0e-6:
continue # loose check
# Use sum of atom vectors as axis if not 0.
axis = add(A, B)
if norm(axis) < 1.0e-12:
continue
axis = normalize(axis)
# Check if axis is perpendicular to z-axis.
if abs(dot(axis, z_axis)) > 1.0e-6:
continue
# Do the thorough check for C2.
if matrix_3d_rotation_Cn(geom, axis, False, tol, 2) == 2:
is_D = True
if verbose > 2:
print(""" perp. C2's : %s""" % ('yes' if is_D else 'no'))
# Now assign point groups! Sn first.
if Sn_z == 2 * Cn_z and not is_D:
self.full_pg = 'Sn'
self.PYfull_pg_n = Sn_z
return
if is_D: # has perpendicular C2's
if op_sigma_h and op_sigma_v: # Dnh : Cn, nC2, sigma_h, nSigma_v
self.full_pg = 'Dnh'
self.PYfull_pg_n = Cn_z
elif Sn_z == 2 * Cn_z: # Dnd : Cn, nC2, S2n axis coincident with Cn
self.full_pg = 'Dnd'
self.PYfull_pg_n = Cn_z
else: # Dn : Cn, nC2
self.full_pg = 'Dn'
self.PYfull_pg_n = Cn_z
else: # lacks perpendicular C2's
if op_sigma_h and Sn_z == Cn_z: # Cnh : Cn, sigma_h, Sn coincident with Cn
self.full_pg = 'Cnh'
self.PYfull_pg_n = Cn_z
elif op_sigma_v: # Cnv : Cn, nCv
self.full_pg = 'Cnv'
self.PYfull_pg_n = Cn_z
else: # Cn : Cn
self.full_pg = 'Cn'
self.PYfull_pg_n = Cn_z
return
def has_inversion(self, origin, tol=DEFAULT_SYM_TOL):
"""Does the molecule have an inversion center at origin
"""
geom = self.geometry(np_out=True)
inverted = 2. * np.asarray(origin) - geom
for at in range(self.natom()):
atom = self.atom_at_position(inverted[at], tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[at]):
return False
return True
def is_plane(self, origin, uperp, tol=DEFAULT_SYM_TOL):
"""Is a plane?
"""
for i in range(self.natom()):
A = sub(self.xyz(i), origin)
Apar = scale(uperp, dot(uperp, A))
Aperp = sub(A, Apar)
A = add(sub(Aperp, Apar), origin)
atom = self.atom_at_position(A, tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
return True
def is_axis(self, origin, axis, order, tol=DEFAULT_SYM_TOL):
"""Is *axis* an axis of order *order* with respect to *origin*?
"""
for i in range(self.natom()):
A = sub(self.xyz(i), origin)
for j in range(1, order):
R = A
R = rotate(R, j * 2.0 * math.pi / order, axis)
R = add(R, origin)
atom = self.atom_at_position(R, tol)
if atom < 0 or not self.atoms[atom].is_equivalent_to(self.atoms[i]):
return False
return True
def is_linear_planar(self, tol=DEFAULT_SYM_TOL):
"""Is the molecule linear, or planar?
>>> print(H2OH2O.is_linear_planar())
(False, False)
"""
linear = None
planar = None
if self.natom() < 3:
linear = True
planar = True
return linear, planar
# find three atoms not on the same line
A = self.xyz(0)
B = self.xyz(1)
BA = sub(B, A)
BA = normalize(BA)
CA = [None, None, None]
min_BAdotCA = 1.0
for i in range(2, self.natom()):
tmp = sub(self.xyz(i), A)
tmp = normalize(tmp)
if math.fabs(dot(BA, tmp)) < min_BAdotCA:
CA = copy.deepcopy(tmp)
min_BAdotCA = math.fabs(dot(BA, tmp))
if min_BAdotCA >= 1.0 - tol:
linear = True
planar = True
return linear, planar
linear = False
if self.natom() < 4:
planar = True
return linear, planar
# check for nontrivial planar molecules
BAxCA = normalize(cross(BA, CA))
for i in range(2, self.natom()):
tmp = sub(self.xyz(i), A)
if math.fabs(dot(tmp, BAxCA)) > tol:
planar = False
return linear, planar
planar = True
return linear, planar
@staticmethod
def like_world_axis(axis, worldxaxis, worldyaxis, worldzaxis):
"""Returns which worldaxis *axis* most overlaps with.
Inverts axis when indicated.
"""
like = None
xlikeness = math.fabs(dot(axis, worldxaxis))
ylikeness = math.fabs(dot(axis, worldyaxis))
zlikeness = math.fabs(dot(axis, worldzaxis))
if (xlikeness - ylikeness) > 1.0E-12 and (xlikeness - zlikeness) > 1.0E-12:
like = 'XAxis'
if dot(axis, worldxaxis) < 0:
axis = scale(axis, -1.0)
elif (ylikeness - zlikeness) > 1.0E-12:
like = 'YAxis'
if dot(axis, worldyaxis) < 0:
axis = scale(axis, -1.0)
else:
like = 'ZAxis'
if dot(axis, worldzaxis) < 0:
axis = scale(axis, -1.0)
return like, axis
def find_point_group(self, tol=DEFAULT_SYM_TOL):
"""Find computational molecular point group, user can override
this with the "symmetry" keyword. Result is highest D2h subgroup
attendant on molecule and allowed by the user.
"""
pg = self.find_highest_point_group(tol) # D2h subgroup
user = self.symmetry_from_input()
if user is not None:
# Need to handle the cases that the user only provides C2, C2v, C2h, Cs.
# These point groups need directionality.
# Did the user provide directionality? If they did, the last letter would be x, y, or z
# Directionality given, assume the user is smart enough to know what they're doing.
user_specified_direction = True if user[-1] in ['X', 'x', 'Y', 'y', 'Z', 'z'] else False
if self.symmetry_from_input() != pg.symbol():
user = PointGroup(self.symmetry_from_input())
if user_specified_direction:
# Assume the user knows what they're doing.
# Make sure user is subgroup of pg
if (pg.bits() & user.bits()) != user.bits():
raise ValidationError("Molecule::find_point_group: User specified point group (%s) is not a subgroup of the highest detected point group (%s)" % (PointGroup.bits_to_full_name(user.bits()), PointGroup.bits_to_full_name(pg.bits())))
else:
similars, count = similar(user.bits())
found = False
for typ in range(count):
# If what the user specified and the similar type
# matches the full point group we've got a match
if (similars[typ] & pg.bits()) == similars[typ]:
found = True
break
if found:
# Construct a point group object using the found similar
user = PointGroup(similars[typ])
else:
raise ValidationError("Molecule::find_point_group: User specified point group (%s) is not a subgroup of the highest detected point group (%s). If this is because the symmetry increased, try to start the calculation again from the last geometry, after checking any symmetry-dependent input, such as DOCC." % (PointGroup.bits_to_full_name(user.bits()), PointGroup.bits_to_full_name(pg.bits())))
# If we make it here, what the user specified is good.
pg = user
return pg
def reset_point_group(self, pgname):
"""Override symmetry from outside the molecule string"""
self.PYsymmetry_from_input = pgname.lower()
self.set_point_group(self.find_point_group())
def find_highest_point_group(self, tol=DEFAULT_SYM_TOL):
"""Find the highest D2h point group from Jet and Ed's code: c1
ci c2 cs d2 c2v c2h d2h. Ignore the user-specified subgroup in
this case.
"""
pg_bits = 0
# The order of the next 2 arrays MUST match!
symm_bit = [
SymmOps['C2_z'],
SymmOps['C2_y'],
SymmOps['C2_x'],
SymmOps['i'],
SymmOps['Sigma_xy'],
SymmOps['Sigma_xz'],
SymmOps['Sigma_yz']]
symm_func = [
SymmetryOperation.c2_z,
SymmetryOperation.c2_y,
SymmetryOperation.c2_x,
SymmetryOperation.i,
SymmetryOperation.sigma_xy,
SymmetryOperation.sigma_xz,
SymmetryOperation.sigma_yz]
symop = SymmetryOperation()
matching_atom = -1
# Only needs to detect the 8 symmetry operations
for g in range(7):
# Call the function pointer
symm_func[g](symop)
found = True
for at in range(self.natom()):
op = [symop[0][0], symop[1][1], symop[2][2]]
pos = naivemult(self.xyz(at), op)
matching_atom = self.atom_at_position(pos, tol)
if matching_atom >= 0:
if not self.atoms[at].is_equivalent_to(self.atoms[matching_atom]):
found = False
break
else:
found = False
break
if found:
pg_bits |= symm_bit[g]
return PointGroup(pg_bits)
def symmetry_frame(self, tol=DEFAULT_SYM_TOL):
"""Determine symmetry reference frame. If noreorient is not set,
this is the rotation matrix applied to the geometry in update_geometry.
>>> print(H2OH2O.symmetry_frame())
[[1.0, -0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -0.0, 1.0]]
"""
com = self.center_of_mass()
if self.wholegeom is not None:
current_geom = self.wholegeom
else:
current_geom = self.geometry(np_out=True)
shifted_geom = current_geom - np.asarray(com)
shifted_geom = shifted_geom.tolist()
worldxaxis = [1.0, 0.0, 0.0]
worldyaxis = [0.0, 1.0, 0.0]
worldzaxis = [0.0, 0.0, 1.0]
sigma = [0.0, 0.0, 0.0]
sigmav = [0.0, 0.0, 0.0]
c2axis = [0.0, 0.0, 0.0]
c2axisperp = [0.0, 0.0, 0.0]
linear, planar = self.is_linear_planar(tol)
have_inversion = self.has_inversion(com, tol)
# check for C2 axis
have_c2axis = False
if self.natom() < 2:
have_c2axis = True
c2axis = [0.0, 0.0, 1.0]
elif linear:
have_c2axis = True
c2axis = sub(self.xyz(1), self.xyz(0))
c2axis = normalize(c2axis)
elif planar and have_inversion:
# there is a c2 axis that won't be found using the usual
# algorithm. find two noncolinear atom-atom vectors (we know
# that linear == 0)
BA = sub(self.xyz(1), self.xyz(0))
BA = normalize(BA)
for i in range(2, self.natom()):
CA = sub(self.xyz(i), self.xyz(0))
CA = normalize(CA)
BAxCA = cross(BA, CA)
if norm(BAxCA) > tol:
have_c2axis = True
BAxCA = normalize(BAxCA)
c2axis = copy.deepcopy(BAxCA)
break
else:
# loop through pairs of atoms to find c2 axis candidates
for i in range(self.natom()):
A = shifted_geom[i]
AdotA = dot(A, A)
for j in range(i + 1):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = shifted_geom[j]
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
axis = add(A, B)
# atoms colinear with the com don't work
if norm(axis) < tol:
continue
axis = normalize(axis)
if self.is_axis(com, axis, 2, tol):
have_c2axis = True
c2axis = copy.deepcopy(axis)
break
else:
continue
break
# symmframe found c2axis
c2like = 'ZAxis'
if have_c2axis:
# try to make the sign of the axis correspond to one of the world axes
c2like, c2axis = self.like_world_axis(c2axis, worldxaxis, worldyaxis, worldzaxis)
# check for c2 axis perp to first c2 axis
have_c2axisperp = False
if have_c2axis:
if self.natom() < 2:
have_c2axisperp = True
c2axisperp = [1.0, 0.0, 0.0]
elif linear:
if have_inversion:
have_c2axisperp = True
c2axisperp = perp_unit(c2axis, [0.0, 0.0, 1.0])
else:
# loop through paris of atoms to find c2 axis candidates
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
for j in range(i):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
axis = add(A, B)
# atoms colinear with the com don't work
if norm(axis) < tol:
continue
axis = normalize(axis)
# if axis is not perp continue
if math.fabs(dot(axis, c2axis)) > tol:
continue
if self.is_axis(com, axis, 2, tol):
have_c2axisperp = True
c2axisperp = copy.deepcopy(axis)
break
else:
continue
break
# symmframe found c2axisperp
if have_c2axisperp:
# try to make the sign of the axis correspond to one of the world axes
c2perplike, c2axisperp = self.like_world_axis(c2axisperp, worldxaxis, worldyaxis, worldzaxis)
# try to make c2axis the z axis
if c2perplike == 'ZAxis':
tmpv = copy.deepcopy(c2axisperp)
c2axisperp = copy.deepcopy(c2axis)
c2axis = copy.deepcopy(tmpv)
c2perplike = c2like
c2like = 'ZAxis'
if c2like != 'ZAxis':
if c2like == 'XAxis':
c2axis = cross(c2axis, c2axisperp)
else:
c2axis = cross(c2axisperp, c2axis)
c2like, c2axis = self.like_world_axis(c2axis, worldxaxis, worldyaxis, worldzaxis)
# try to make c2axisperplike the x axis
if c2perplike == 'YAxis':
c2axisperp = cross(c2axisperp, c2axis)
c2perplike, c2axisperp = self.like_world_axis(c2axisperp, worldxaxis, worldyaxis, worldzaxis)
# Check for vertical plane
have_sigmav = False
if have_c2axis:
if self.natom() < 2:
have_sigmav = True
sigmav = copy.deepcopy(c2axisperp)
elif linear:
have_sigmav = True
if have_c2axisperp:
sigmav = copy.deepcopy(c2axisperp)
else:
sigmav = perp_unit(c2axis, [0.0, 0.0, 1.0])
else:
# loop through pairs of atoms to find sigma v plane candidates
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
# the second atom can equal i because i might be in the plane
for j in range(i + 1):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
# the atoms must be the same distance from the com
if math.fabs(AdotA - dot(B, B)) > tol:
continue
inplane = add(B, A)
norm_inplane = norm(inplane)
if norm_inplane < tol:
continue
inplane = scale(inplane, 1.0 / norm_inplane)
perp = cross(c2axis, inplane)
norm_perp = norm(perp)
if norm_perp < tol:
continue
perp = scale(perp, 1.0 / norm_perp)
if self.is_plane(com, perp, tol):
have_sigmav = True
sigmav = copy.deepcopy(perp)
break
else:
continue
break
# symmframe found sigmav
if have_sigmav:
# try to make the sign of the oop vec correspond to one of the world axes
sigmavlike, sigmav = self.like_world_axis(sigmav, worldxaxis, worldyaxis, worldzaxis)
# Choose sigmav to be the world x axis, if possible
if c2like == 'ZAxis' and sigmavlike == 'YAxis':
sigmav = cross(sigmav, c2axis)
elif c2like == 'YAxis' and sigmavlike == 'ZAxis':
sigmav = cross(c2axis, sigmav)
# under certain conditions i need to know if there is any sigma plane
have_sigma = False
if not have_inversion and not have_c2axis:
if planar:
# find two noncolinear atom-atom vectors
# we know that linear==0 since !have_c2axis
BA = sub(self.xyz(1), self.xyz(0))
BA = normalize(BA)
for i in range(2, self.natom()):
CA = sub(self.xyz(i), self.xyz(0))
CA = normalize(CA)
BAxCA = cross(BA, CA)
if norm(BAxCA) > tol:
have_sigma = True
BAxCA = normalize(BAxCA)
sigma = copy.deepcopy(BAxCA)
break
else:
# loop through pairs of atoms to contruct trial planes
for i in range(self.natom()):
A = sub(self.xyz(i), com)
AdotA = dot(A, A)
for j in range(i):
# the atoms must be identical
if not self.atoms[i].is_equivalent_to(self.atoms[j]):
continue
B = sub(self.xyz(j), com)
BdotB = dot(B, B)
# the atoms must be the same distance from the com
if math.fabs(AdotA - BdotB) > tol:
continue
perp = sub(B, A)
norm_perp = norm(perp)
if norm_perp < tol:
continue
perp = scale(perp, 1.0 / norm_perp)
if self.is_plane(com, perp, tol):
have_sigma = True
sigma = copy.deepcopy(perp)
break
else:
continue
break
# foundsigma
if have_sigma:
# try to make the sign of the oop vec correspond to one of the world axes
xlikeness = math.fabs(dot(sigma, worldxaxis))
ylikeness = math.fabs(dot(sigma, worldyaxis))
zlikeness = math.fabs(dot(sigma, worldzaxis))
if xlikeness > ylikeness and xlikeness > zlikeness:
if dot(sigma, worldxaxis) < 0:
sigma = scale(sigma, -1.0)
elif ylikeness > zlikeness:
if dot(sigma, worldyaxis) < 0:
sigma = scale(sigma, -1.0)
else:
if dot(sigma, worldzaxis) < 0:
sigma = scale(sigma, -1.0)
# Find the three axes for the symmetry frame
xaxis = copy.deepcopy(worldxaxis)
zaxis = copy.deepcopy(worldzaxis)
if have_c2axis:
zaxis = copy.deepcopy(c2axis)
if have_sigmav:
xaxis = copy.deepcopy(sigmav)
elif have_c2axisperp:
xaxis = copy.deepcopy(c2axisperp)
else:
# any axis orthogonal to the zaxis will do
xaxis = perp_unit(zaxis, zaxis)
elif have_sigma:
zaxis = copy.deepcopy(sigma)
xaxis = perp_unit(zaxis, zaxis)
# Clean up our z axis
if math.fabs(zaxis[0]) < NOISY_ZERO:
zaxis[0] = 0.0
if math.fabs(zaxis[1]) < NOISY_ZERO:
zaxis[1] = 0.0
if math.fabs(zaxis[2]) < NOISY_ZERO:
zaxis[2] = 0.0
# Clean up our x axis
if math.fabs(xaxis[0]) < NOISY_ZERO:
xaxis[0] = 0.0
if math.fabs(xaxis[1]) < NOISY_ZERO:
xaxis[1] = 0.0
if math.fabs(xaxis[2]) < NOISY_ZERO:
xaxis[2] = 0.0
# the y is then -x cross z
yaxis = scale(cross(xaxis, zaxis), -1.0)
#print("xaxis %20.14lf %20.14lf %20.14lf" % (xaxis[0], xaxis[1], xaxis[2]))
#print("yaxis %20.14lf %20.14lf %20.14lf" % (yaxis[0], yaxis[1], yaxis[2]))
#print("zaxis %20.14lf %20.14lf %20.14lf" % (zaxis[0], zaxis[1], zaxis[2]))
frame = zero(3, 3)
for i in range(3):
frame[i][0] = xaxis[i]
frame[i][1] = yaxis[i]
frame[i][2] = zaxis[i]
return frame
def release_symmetry_information(self):
"""Release symmetry information"""
self.PYnunique = 0
self.nequiv = 0
self.PYatom_to_unique = 0
self.equiv = 0
def form_symmetry_information(self, tol=DEFAULT_SYM_TOL):
"""Initialize molecular specific symmetry information.
Uses the point group object obtain by calling point_group()
"""
if self.equiv:
self.release_symmetry_information()
if self.natom() == 0:
self.PYnunique = 0
self.nequiv = 0
self.PYatom_to_unique = 0
self.equiv = 0
print("""No atoms detected, returning\n""")
return
self.nequiv = []
self.PYatom_to_unique = [0] * self.natom()
self.equiv = []
if self.point_group().symbol() == 'c1':
self.PYnunique = self.natom()
for at in range(self.natom()):
self.nequiv.append(1)
self.PYatom_to_unique[at] = at
self.equiv.append([at])
return
# The first atom is always unique
self.PYnunique = 1
self.nequiv.append(1)
self.PYatom_to_unique[0] = 0
self.equiv.append([0])
ct = self.point_group().char_table()
so = SymmetryOperation()
np3 = [0.0, 0.0, 0.0]
current_geom = self.geometry(np_out=False)
current_Z = [self.Z(at) for at in range(self.natom())]
current_mass = [self.mass(at) for at in range(self.natom())]
# Find the equivalent atoms
for i in range(1, self.natom()):
ac = current_geom[i]
i_is_unique = True
i_equiv = 0
# Apply all symmetry ops in the group to the atom
for g in range(ct.order()):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
# See if the transformed atom is equivalent to a unique atom
for j in range(self.PYnunique):
unique = self.equiv[j][0]
aj = current_geom[unique]
if current_Z[unique] == current_Z[i] and \
abs(current_mass[unique] - current_mass[i]) < tol and \
distance(np3, aj) < tol:
i_is_unique = False
i_equiv = j
break
if i_is_unique:
self.nequiv.append(1)
self.PYatom_to_unique[i] = self.PYnunique
self.equiv.append([i])
self.PYnunique += 1
else:
self.equiv[i_equiv].append(i)
self.nequiv[i_equiv] += 1
self.PYatom_to_unique[i] = i_equiv
# The first atom in the equiv list is considered the primary
# unique atom. Just to make things look pretty, make the
# atom with the most zeros in its x, y, z coordinate the
# unique atom. Nothing else should rely on this being done.
ztol = 1.0e-5
for i in range(self.PYnunique):
maxzero = 0
jmaxzero = 0
for j in range(self.nequiv[i]):
nzero = 0
tmp = self.equiv[i][j]
arr = np.asarray(current_geom[tmp])
nzero = len(np.where(np.abs(arr) < ztol))
if nzero > maxzero:
maxzero = nzero
jmaxzero = j
tmp = self.equiv[i][jmaxzero]
self.equiv[i][jmaxzero] = self.equiv[i][0]
self.equiv[i][0] = tmp
#print('nunique', self.PYnunique)
#print('nequiv', self.nequiv)
#print('atom_to_unique', self.PYatom_to_unique)
#print('equiv', self.equiv)
def sym_label(self):
"""Returns the symmetry label"""
if self.pg is None:
self.set_point_group(self.find_point_group())
return self.pg.symbol()
def irrep_labels(self):
"""Returns the irrep labels"""
if self.pg is None:
self.set_point_group(self.find_point_group())
return [self.pg.char_table().gamma(i).symbol_ns() for i in range(self.pg.char_table().nirrep())]
def symmetry_from_input(self):
"""Returns the symmetry specified in the input.
>>> print(H2OH2O.symmetry_from_input())
C1
"""
return self.PYsymmetry_from_input
def symmetrize(self, tol=None):
"""Force the molecule to have the symmetry specified in pg.
This is to handle noise coming in from optking. Exception is thrown if
atoms cannot be mapped within tol(erance).
"""
#raise FeatureNotImplemented('Molecule::symmetrize') # FINAL SYMM
temp = zero(self.natom(), 3)
ct = self.point_group().char_table()
# Obtain atom mapping of atom * symm op to atom
# Allow compute_atom_map() to use its own default, if not specified here.
if tol is not None:
atom_map = compute_atom_map(self, tol)
else:
atom_map = compute_atom_map(self)
# Symmetrize the molecule to remove any noise
for at in range(self.natom()):
for g in range(ct.order()):
Gatom = atom_map[at][g]
so = ct.symm_operation(g)
# Full so must be used if molecule is not in standard orientation
temp[at][0] += so[0][0] * self.x(Gatom) / ct.order()
temp[at][0] += so[0][1] * self.y(Gatom) / ct.order()
temp[at][0] += so[0][2] * self.z(Gatom) / ct.order()
temp[at][1] += so[1][0] * self.x(Gatom) / ct.order()
temp[at][1] += so[1][1] * self.y(Gatom) / ct.order()
temp[at][1] += so[1][2] * self.z(Gatom) / ct.order()
temp[at][2] += so[2][0] * self.x(Gatom) / ct.order()
temp[at][2] += so[2][1] * self.y(Gatom) / ct.order()
temp[at][2] += so[2][2] * self.z(Gatom) / ct.order()
# Set the geometry to ensure z-matrix variables get updated
self.set_geometry(temp)
def schoenflies_symbol(self):
"""Returns the Schoenflies symbol"""
return self.point_group().symbol()
def valid_atom_map(self, tol=0.01):
"""Check if current geometry fits current point group
"""
np3 = [0.0, 0.0, 0.0]
ct = self.point_group().char_table()
# loop over all centers
for at in range(self.natom()):
ac = self.xyz(at)
# For each operation in the pointgroup, transform the coordinates of
# center "at" and see which atom it maps into
for g in range(ct.order()):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
if self.atom_at_position(np3, tol) < 0:
return False
return True
# provide a more transparent name for this utility
is_symmetric = valid_atom_map
# Test a set of xyz coordinates to see if they satisfy the symmetry operations
# of the current molecule.
def is_XYZ_symmetric(self, XYZ, tol=0.01):
testmol = self.clone()
testmol.set_geometry(XYZ)
return testmol.is_symmetric(tol)
#def valid_atom_map(self, tol=0.01):
# """Check if current geometry fits current point group
# """
# np3 = np.zeros(3)
# ct = self.point_group().char_table()
# current_geom = self.geometry(np_out=True)
# # loop over all centers
# for at in range(self.natom()):
# # For each operation in the pointgroup, transform the coordinates of
# # center "at" and see which atom it maps into
# for g in range(ct.order()):
# so = ct.symm_operation(g)
# np3 = so.dot(current_geom[at])
# if self.atom_at_position(np3, tol) < 0:
# return False
# return True
def full_point_group_with_n(self):
"""Return point group name such as Cnv or Sn."""
return self.full_pg
def full_pg_n(self):
"""Return n in Cnv, etc.; If there is no n (e.g. Td)
it's the highest-order rotation axis.
"""
return self.PYfull_pg_n
def get_full_point_group(self):
"""Return point group name such as C3v or S8.
(method name in libmints is full_point_group)
"""
pg_with_n = self.full_pg
if pg_with_n in ['D_inf_h', 'C_inf_v', 'C1', 'Cs', 'Ci', 'Td', 'Oh', 'Ih']:
return pg_with_n # These don't need changes - have no 'n'.
else:
return pg_with_n.replace('n', str(self.PYfull_pg_n), 1)
# <<< Methods for Uniqueness >>> (assume molecular point group has been determined)
def nunique(self):
"""Return the number of unique atoms."""
return self.PYnunique
def unique(self, iuniq):
"""Returns the overall number of the iuniq'th unique atom."""
return self.equiv[iuniq][0]
def nequivalent(self, iuniq):
"""Returns the number of atoms equivalent to iuniq."""
return self.nequiv[iuniq]
def equivalent(self, iuniq, j):
"""Returns the j'th atom equivalent to iuniq."""
return self.equiv[iuniq][j]
def atom_to_unique(self, iatom):
"""Converts an atom number to the number of its generating unique atom.
The return value is in [0, nunique).
"""
return self.PYatom_to_unique[iatom]
def atom_to_unique_offset(self, iatom):
"""Converts an atom number to the offset of this atom
in the list of generated atoms. The unique atom itself is allowed offset 0.
"""
iuniq = self.PYatom_to_unique[iatom]
nequiv = self.nequiv[iuniq]
for i in range(nequiv):
if self.equiv[iuniq][i] == iatom:
return i
raise ValidationError("Molecule::atom_to_unique_offset: I should've found the atom requested...but didn't.")
return -1
def max_nequivalent(self):
"""Returns the maximum number of equivalent atoms."""
mmax = 0
for i in range(self.nunique()):
if mmax < self.nequivalent(i):
mmax = self.nequivalent(i)
return mmax
def atom_present_in_geom(geom, b, tol=DEFAULT_SYM_TOL):
"""Function used by set_full_point_group() to scan a given geometry
and determine if an atom is present at a given location.
"""
for i in range(len(geom)):
a = [geom[i][0], geom[i][1], geom[i][2]]
if distance(b, a) < tol:
return True
return False
def matrix_3d_rotation_Cn(coord, axis, reflect, tol=DEFAULT_SYM_TOL, max_Cn_to_check=-1):
"""Find maximum n in Cn around given axis, i.e., the highest-order rotation axis.
@param coord Matrix : points to rotate - column dim is 3
@param axis Vector3 : axis around which to rotate, does not need to be normalized
@param bool reflect : if true, really look for Sn not Cn
@returns n
"""
# Check all atoms. In future, make more intelligent.
max_possible = len(coord) if max_Cn_to_check == -1 else max_Cn_to_check
Cn = 1 # C1 is there for sure
for n in range(2, max_possible + 1):
rotated_mat = matrix_3d_rotation(coord, axis, 2 * math.pi / n, reflect)
if equal_but_for_row_order(coord, rotated_mat, tol):
Cn = n
return Cn
def matrix_3d_rotation(mat, axis, phi, Sn):
"""For a matrix of 3D vectors (ncol==3), rotate a set of points around an
arbitrary axis. Vectors are the rows of the matrix.
@param axis Vector3 : axis around which to rotate (need not be normalized)
@param phi double : magnitude of rotation in rad
@param Sn bool : if true, then also reflect in plane through origin and perpendicular to rotation
@returns SharedMatrix with rotated points (rows)
"""
if len(mat[0]) != 3 or len(axis) != 3:
raise ValidationError("matrix_3d_rotation: Can only rotate matrix with 3d vectors")
# Normalize rotation vector
[wx, wy, wz] = normalize(axis)
cp = 1.0 - math.cos(phi)
R = zero(3, 3)
R[0][0] = wx * wx * cp + math.cos(phi)
R[0][1] = wx * wy * cp + math.sin(phi) * wz * -1
R[0][2] = wx * wz * cp + math.sin(phi) * wy
R[1][0] = wx * wy * cp + math.sin(phi) * wz
R[1][1] = wy * wy * cp + math.cos(phi)
R[1][2] = wy * wz * cp + math.sin(phi) * wx * -1
R[2][0] = wx * wz * cp + math.sin(phi) * wy * -1
R[2][1] = wy * wz * cp + math.sin(phi) * wx
R[2][2] = wz * wz * cp + math.cos(phi)
# R * coord^t = R_coord^t or coord * R^t = R_coord
#Matrix rotated_coord(nrow(),3);
#rotated_coord.gemm(false, true, 1.0, *this, R, 0.0);
rotated_coord = mult(mat, transpose(R))
# print('after C')
# show(rotated_coord)
if Sn: # delta_ij - 2 a_i a_j / ||a||^2
R = identity(3)
#R = zero(3, 3)
R[0][0] -= 2 * wx * wx
R[1][1] -= 2 * wy * wy
R[2][2] -= 2 * wz * wz
#R[0][0] = 1 - 2 * wx * wx
#R[1][1] = 1 - 2 * wy * wy
#R[2][2] = 1 - 2 * wz * wz
R[1][0] = 2 * wx * wy
R[2][0] = 2 * wx * wz
R[2][1] = 2 * wy * wz
R[0][1] = 2 * wx * wy
R[0][2] = 2 * wx * wz
R[1][2] = 2 * wy * wz
rotated_coord = mult(rotated_coord, transpose(R))
#tmp = mult(rotated_coord, transpose(R))
#Matrix tmp(nrow(),3);
#tmp.gemm(false, true, 1.0, rotated_coord, R, 0.0);
#rotated_coord.copy(tmp);
#rotated_coord = [row[:] for row in tmp]
#SharedMatrix to_return = rotated_coord.clone();
#return to_return
return rotated_coord
def equal_but_for_row_order(mat, rhs, tol=DEFAULT_SYM_TOL):
"""Checks matrix equality, but allows rows to be in a different order.
@param rhs Matrix to compare to.
@returns true if equal, otherwise false.
"""
for m in range(len(mat)):
for m_rhs in range(len(mat)):
for n in range(len(mat[m])):
if abs(mat[m][n] - rhs[m_rhs][n]) > tol:
break # from n
else:
# whole row matched, goto next m row
break # from m_rhs
else:
# no matching row was found
return False
else:
return True
def compute_atom_map(mol, tol=0.05):
"""Computes atom mappings during symmetry operations. Useful in
generating SO information and Cartesian displacement SALCs.
param mol Molecule to form mapping matrix from.
returns Integer matrix of dimension natoms X nirreps.
"""
# create the character table for the point group
ct = mol.point_group().char_table()
natom = mol.natom()
ng = ct.order()
atom_map = [0] * natom
for i in range(natom):
atom_map[i] = [0] * ng
np3 = [0.0, 0.0, 0.0]
so = SymmetryOperation()
# loop over all centers
for i in range(natom):
ac = mol.xyz(i)
# then for each symop in the pointgroup, transform the coordinates of
# center "i" and see which atom it maps into
for g in range(ng):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
atom_map[i][g] = mol.atom_at_position(np3, tol)
if atom_map[i][g] < 0:
print(""" Molecule:\n""")
mol.print_out()
print(""" attempted to find atom at\n""")
print(""" %lf %lf %lf\n""" % (np3[0], np3[1], np3[2]))
raise ValidationError("ERROR: Symmetry operation %d did not map atom %d to another atom:\n" % (g, i + 1))
return atom_map
# TODO outfile
# ignored =, +, 0, += assignment operators
# no pubchem
# TODO rename save_string_for_psi4
# TODO add no_com no_reorint in save string for psi4
|
apache/incubator-airflow | refs/heads/master | tests/providers/google/cloud/hooks/test_automl.py | 3 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from google.cloud.automl_v1beta1 import AutoMlClient
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
CREDENTIALS = "test-creds"
CLIENT_INFO = "client-info"
TASK_ID = "test-automl-hook"
GCP_PROJECT_ID = "test-project"
GCP_LOCATION = "test-location"
MODEL_NAME = "test_model"
MODEL_ID = "projects/198907790164/locations/us-central1/models/TBL9195602771183665152"
DATASET_ID = "TBL123456789"
MODEL = {
"display_name": MODEL_NAME,
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
LOCATION_PATH = f"projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}"
MODEL_PATH = f"projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/models/{MODEL_ID}"
DATASET_PATH = f"projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/datasets/{DATASET_ID}"
INPUT_CONFIG = {"input": "value"}
OUTPUT_CONFIG = {"output": "value"}
PAYLOAD = {"test": "payload"}
DATASET = {"dataset_id": "data"}
MASK = {"field": "mask"}
class TestAuoMLHook(unittest.TestCase):
def setUp(self) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudAutoMLHook()
self.hook._get_credentials = mock.MagicMock(return_value=CREDENTIALS) # type: ignore
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient")
def test_get_conn(self, mock_automl_client, mock_client_info):
self.hook.get_conn()
mock_automl_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient")
def test_prediction_client(self, mock_prediction_client, mock_client_info):
client = self.hook.prediction_client # pylint: disable=unused-variable # noqa
mock_prediction_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_model")
def test_create_model(self, mock_create_model):
self.hook.create_model(model=MODEL, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_model.assert_called_once_with(
request=dict(parent=LOCATION_PATH, model=MODEL), retry=None, timeout=None, metadata=()
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.batch_predict")
def test_batch_predict(self, mock_batch_predict):
self.hook.batch_predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
)
mock_batch_predict.assert_called_once_with(
request=dict(
name=MODEL_PATH, input_config=INPUT_CONFIG, output_config=OUTPUT_CONFIG, params=None
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.predict")
def test_predict(self, mock_predict):
self.hook.predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
payload=PAYLOAD,
)
mock_predict.assert_called_once_with(
request=dict(name=MODEL_PATH, payload=PAYLOAD, params=None),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_dataset")
def test_create_dataset(self, mock_create_dataset):
self.hook.create_dataset(dataset=DATASET, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_dataset.assert_called_once_with(
request=dict(parent=LOCATION_PATH, dataset=DATASET),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.import_data")
def test_import_dataset(self, mock_import_data):
self.hook.import_data(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
)
mock_import_data.assert_called_once_with(
request=dict(name=DATASET_PATH, input_config=INPUT_CONFIG),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_column_specs")
def test_list_column_specs(self, mock_list_column_specs):
table_spec = "table_spec_id"
filter_ = "filter"
page_size = 42
self.hook.list_column_specs(
dataset_id=DATASET_ID,
table_spec_id=table_spec,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
)
parent = AutoMlClient.table_spec_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID, table_spec)
mock_list_column_specs.assert_called_once_with(
request=dict(parent=parent, field_mask=MASK, filter=filter_, page_size=page_size),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.get_model")
def test_get_model(self, mock_get_model):
self.hook.get_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_get_model.assert_called_once_with(
request=dict(name=MODEL_PATH), retry=None, timeout=None, metadata=()
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_model")
def test_delete_model(self, mock_delete_model):
self.hook.delete_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_model.assert_called_once_with(
request=dict(name=MODEL_PATH), retry=None, timeout=None, metadata=()
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.update_dataset")
def test_update_dataset(self, mock_update_dataset):
self.hook.update_dataset(
dataset=DATASET,
update_mask=MASK,
)
mock_update_dataset.assert_called_once_with(
request=dict(dataset=DATASET, update_mask=MASK), retry=None, timeout=None, metadata=()
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.deploy_model")
def test_deploy_model(self, mock_deploy_model):
image_detection_metadata = {}
self.hook.deploy_model(
model_id=MODEL_ID,
image_detection_metadata=image_detection_metadata,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
)
mock_deploy_model.assert_called_once_with(
request=dict(
name=MODEL_PATH,
image_object_detection_model_deployment_metadata=image_detection_metadata,
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_table_specs")
def test_list_table_specs(self, mock_list_table_specs):
filter_ = "filter"
page_size = 42
self.hook.list_table_specs(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
filter_=filter_,
page_size=page_size,
)
mock_list_table_specs.assert_called_once_with(
request=dict(parent=DATASET_PATH, filter=filter_, page_size=page_size),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_datasets")
def test_list_datasets(self, mock_list_datasets):
self.hook.list_datasets(location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_list_datasets.assert_called_once_with(
request=dict(parent=LOCATION_PATH), retry=None, timeout=None, metadata=()
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_dataset")
def test_delete_dataset(self, mock_delete_dataset):
self.hook.delete_dataset(dataset_id=DATASET_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_dataset.assert_called_once_with(
request=dict(name=DATASET_PATH), retry=None, timeout=None, metadata=()
)
|
jallohm/django | refs/heads/master | django/views/decorators/vary.py | 586 | from functools import wraps
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return inner_func
|
schinke/solid-fortnight-ba | refs/heads/master | flask/venv/lib/python2.7/site-packages/psycopg2/_range.py | 47 | """Implementation of the Range type and adaptation
"""
# psycopg/_range.py - Implementation of the Range type and adaptation
#
# Copyright (C) 2012 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import re
from psycopg2._psycopg import ProgrammingError, InterfaceError
from psycopg2.extensions import ISQLQuote, adapt, register_adapter, b
from psycopg2.extensions import new_type, new_array_type, register_type
class Range(object):
"""Python representation for a PostgreSQL |range|_ type.
:param lower: lower bound for the range. `!None` means unbound
:param upper: upper bound for the range. `!None` means unbound
:param bounds: one of the literal strings ``()``, ``[)``, ``(]``, ``[]``,
representing whether the lower or upper bounds are included
:param empty: if `!True`, the range is empty
"""
__slots__ = ('_lower', '_upper', '_bounds')
def __init__(self, lower=None, upper=None, bounds='[)', empty=False):
if not empty:
if bounds not in ('[)', '(]', '()', '[]'):
raise ValueError("bound flags not valid: %r" % bounds)
self._lower = lower
self._upper = upper
self._bounds = bounds
else:
self._lower = self._upper = self._bounds = None
def __repr__(self):
if self._bounds is None:
return "%s(empty=True)" % self.__class__.__name__
else:
return "%s(%r, %r, %r)" % (self.__class__.__name__,
self._lower, self._upper, self._bounds)
@property
def lower(self):
"""The lower bound of the range. `!None` if empty or unbound."""
return self._lower
@property
def upper(self):
"""The upper bound of the range. `!None` if empty or unbound."""
return self._upper
@property
def isempty(self):
"""`!True` if the range is empty."""
return self._bounds is None
@property
def lower_inf(self):
"""`!True` if the range doesn't have a lower bound."""
if self._bounds is None: return False
return self._lower is None
@property
def upper_inf(self):
"""`!True` if the range doesn't have an upper bound."""
if self._bounds is None: return False
return self._upper is None
@property
def lower_inc(self):
"""`!True` if the lower bound is included in the range."""
if self._bounds is None: return False
if self._lower is None: return False
return self._bounds[0] == '['
@property
def upper_inc(self):
"""`!True` if the upper bound is included in the range."""
if self._bounds is None: return False
if self._upper is None: return False
return self._bounds[1] == ']'
def __contains__(self, x):
if self._bounds is None: return False
if self._lower is not None:
if self._bounds[0] == '[':
if x < self._lower: return False
else:
if x <= self._lower: return False
if self._upper is not None:
if self._bounds[1] == ']':
if x > self._upper: return False
else:
if x >= self._upper: return False
return True
def __nonzero__(self):
return self._bounds is not None
def __eq__(self, other):
if not isinstance(other, Range):
return False
return (self._lower == other._lower
and self._upper == other._upper
and self._bounds == other._bounds)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._lower, self._upper, self._bounds))
# as the postgres docs describe for the server-side stuff,
# ordering is rather arbitrary, but will remain stable
# and consistent.
def __lt__(self, other):
if not isinstance(other, Range):
return NotImplemented
for attr in ('_lower', '_upper', '_bounds'):
self_value = getattr(self, attr)
other_value = getattr(other, attr)
if self_value == other_value:
pass
elif self_value is None:
return True
elif other_value is None:
return False
else:
return self_value < other_value
return False
def __le__(self, other):
if self == other:
return True
else:
return self.__lt__(other)
def __gt__(self, other):
if isinstance(other, Range):
return other.__lt__(self)
else:
return NotImplemented
def __ge__(self, other):
if self == other:
return True
else:
return self.__gt__(other)
def register_range(pgrange, pyrange, conn_or_curs, globally=False):
"""Create and register an adapter and the typecasters to convert between
a PostgreSQL |range|_ type and a PostgreSQL `Range` subclass.
:param pgrange: the name of the PostgreSQL |range| type. Can be
schema-qualified
:param pyrange: a `Range` strict subclass, or just a name to give to a new
class
:param conn_or_curs: a connection or cursor used to find the oid of the
range and its subtype; the typecaster is registered in a scope limited
to this object, unless *globally* is set to `!True`
:param globally: if `!False` (default) register the typecaster only on
*conn_or_curs*, otherwise register it globally
:return: `RangeCaster` instance responsible for the conversion
If a string is passed to *pyrange*, a new `Range` subclass is created
with such name and will be available as the `~RangeCaster.range` attribute
of the returned `RangeCaster` object.
The function queries the database on *conn_or_curs* to inspect the
*pgrange* type and raises `~psycopg2.ProgrammingError` if the type is not
found. If querying the database is not advisable, use directly the
`RangeCaster` class and register the adapter and typecasters using the
provided functions.
"""
caster = RangeCaster._from_db(pgrange, pyrange, conn_or_curs)
caster._register(not globally and conn_or_curs or None)
return caster
class RangeAdapter(object):
"""`ISQLQuote` adapter for `Range` subclasses.
This is an abstract class: concrete classes must set a `name` class
attribute or override `getquoted()`.
"""
name = None
def __init__(self, adapted):
self.adapted = adapted
def __conform__(self, proto):
if self._proto is ISQLQuote:
return self
def prepare(self, conn):
self._conn = conn
def getquoted(self):
if self.name is None:
raise NotImplementedError(
'RangeAdapter must be subclassed overriding its name '
'or the getquoted() method')
r = self.adapted
if r.isempty:
return b("'empty'::" + self.name)
if r.lower is not None:
a = adapt(r.lower)
if hasattr(a, 'prepare'):
a.prepare(self._conn)
lower = a.getquoted()
else:
lower = b('NULL')
if r.upper is not None:
a = adapt(r.upper)
if hasattr(a, 'prepare'):
a.prepare(self._conn)
upper = a.getquoted()
else:
upper = b('NULL')
return b(self.name + '(') + lower + b(', ') + upper \
+ b(", '%s')" % r._bounds)
class RangeCaster(object):
"""Helper class to convert between `Range` and PostgreSQL range types.
Objects of this class are usually created by `register_range()`. Manual
creation could be useful if querying the database is not advisable: in
this case the oids must be provided.
"""
def __init__(self, pgrange, pyrange, oid, subtype_oid, array_oid=None):
self.subtype_oid = subtype_oid
self._create_ranges(pgrange, pyrange)
name = self.adapter.name or self.adapter.__class__.__name__
self.typecaster = new_type((oid,), name, self.parse)
if array_oid is not None:
self.array_typecaster = new_array_type(
(array_oid,), name + "ARRAY", self.typecaster)
else:
self.array_typecaster = None
def _create_ranges(self, pgrange, pyrange):
"""Create Range and RangeAdapter classes if needed."""
# if got a string create a new RangeAdapter concrete type (with a name)
# else take it as an adapter. Passing an adapter should be considered
# an implementation detail and is not documented. It is currently used
# for the numeric ranges.
self.adapter = None
if isinstance(pgrange, basestring):
self.adapter = type(pgrange, (RangeAdapter,), {})
self.adapter.name = pgrange
else:
try:
if issubclass(pgrange, RangeAdapter) and pgrange is not RangeAdapter:
self.adapter = pgrange
except TypeError:
pass
if self.adapter is None:
raise TypeError(
'pgrange must be a string or a RangeAdapter strict subclass')
self.range = None
try:
if isinstance(pyrange, basestring):
self.range = type(pyrange, (Range,), {})
if issubclass(pyrange, Range) and pyrange is not Range:
self.range = pyrange
except TypeError:
pass
if self.range is None:
raise TypeError(
'pyrange must be a type or a Range strict subclass')
@classmethod
def _from_db(self, name, pyrange, conn_or_curs):
"""Return a `RangeCaster` instance for the type *pgrange*.
Raise `ProgrammingError` if the type is not found.
"""
from psycopg2.extensions import STATUS_IN_TRANSACTION
from psycopg2.extras import _solve_conn_curs
conn, curs = _solve_conn_curs(conn_or_curs)
if conn.server_version < 90200:
raise ProgrammingError("range types not available in version %s"
% conn.server_version)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# Use the correct schema
if '.' in name:
schema, tname = name.split('.', 1)
else:
tname = name
schema = 'public'
# get the type oid and attributes
try:
curs.execute("""\
select rngtypid, rngsubtype,
(select typarray from pg_type where oid = rngtypid)
from pg_range r
join pg_type t on t.oid = rngtypid
join pg_namespace ns on ns.oid = typnamespace
where typname = %s and ns.nspname = %s;
""", (tname, schema))
except ProgrammingError:
if not conn.autocommit:
conn.rollback()
raise
else:
rec = curs.fetchone()
# revert the status of the connection as before the command
if (conn_status != STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
if not rec:
raise ProgrammingError(
"PostgreSQL type '%s' not found" % name)
type, subtype, array = rec
return RangeCaster(name, pyrange,
oid=type, subtype_oid=subtype, array_oid=array)
_re_range = re.compile(r"""
( \(|\[ ) # lower bound flag
(?: # lower bound:
" ( (?: [^"] | "")* ) " # - a quoted string
| ( [^",]+ ) # - or an unquoted string
)? # - or empty (not catched)
,
(?: # upper bound:
" ( (?: [^"] | "")* ) " # - a quoted string
| ( [^"\)\]]+ ) # - or an unquoted string
)? # - or empty (not catched)
( \)|\] ) # upper bound flag
""", re.VERBOSE)
_re_undouble = re.compile(r'(["\\])\1')
def parse(self, s, cur=None):
if s is None:
return None
if s == 'empty':
return self.range(empty=True)
m = self._re_range.match(s)
if m is None:
raise InterfaceError("failed to parse range: '%s'" % s)
lower = m.group(3)
if lower is None:
lower = m.group(2)
if lower is not None:
lower = self._re_undouble.sub(r"\1", lower)
upper = m.group(5)
if upper is None:
upper = m.group(4)
if upper is not None:
upper = self._re_undouble.sub(r"\1", upper)
if cur is not None:
lower = cur.cast(self.subtype_oid, lower)
upper = cur.cast(self.subtype_oid, upper)
bounds = m.group(1) + m.group(6)
return self.range(lower, upper, bounds)
def _register(self, scope=None):
register_type(self.typecaster, scope)
if self.array_typecaster is not None:
register_type(self.array_typecaster, scope)
register_adapter(self.range, self.adapter)
class NumericRange(Range):
"""A `Range` suitable to pass Python numeric types to a PostgreSQL range.
PostgreSQL types :sql:`int4range`, :sql:`int8range`, :sql:`numrange` are
casted into `!NumericRange` instances.
"""
pass
class DateRange(Range):
"""Represents :sql:`daterange` values."""
pass
class DateTimeRange(Range):
"""Represents :sql:`tsrange` values."""
pass
class DateTimeTZRange(Range):
"""Represents :sql:`tstzrange` values."""
pass
# Special adaptation for NumericRange. Allows to pass number range regardless
# of whether they are ints, floats and what size of ints are, which are
# pointless in Python world. On the way back, no numeric range is casted to
# NumericRange, but only to their subclasses
class NumberRangeAdapter(RangeAdapter):
"""Adapt a range if the subtype doesn't need quotes."""
def getquoted(self):
r = self.adapted
if r.isempty:
return b("'empty'")
if not r.lower_inf:
# not exactly: we are relying that none of these object is really
# quoted (they are numbers). Also, I'm lazy and not preparing the
# adapter because I assume encoding doesn't matter for these
# objects.
lower = adapt(r.lower).getquoted().decode('ascii')
else:
lower = ''
if not r.upper_inf:
upper = adapt(r.upper).getquoted().decode('ascii')
else:
upper = ''
return ("'%s%s,%s%s'" % (
r._bounds[0], lower, upper, r._bounds[1])).encode('ascii')
# TODO: probably won't work with infs, nans and other tricky cases.
register_adapter(NumericRange, NumberRangeAdapter)
# Register globally typecasters and adapters for builtin range types.
# note: the adapter is registered more than once, but this is harmless.
int4range_caster = RangeCaster(NumberRangeAdapter, NumericRange,
oid=3904, subtype_oid=23, array_oid=3905)
int4range_caster._register()
int8range_caster = RangeCaster(NumberRangeAdapter, NumericRange,
oid=3926, subtype_oid=20, array_oid=3927)
int8range_caster._register()
numrange_caster = RangeCaster(NumberRangeAdapter, NumericRange,
oid=3906, subtype_oid=1700, array_oid=3907)
numrange_caster._register()
daterange_caster = RangeCaster('daterange', DateRange,
oid=3912, subtype_oid=1082, array_oid=3913)
daterange_caster._register()
tsrange_caster = RangeCaster('tsrange', DateTimeRange,
oid=3908, subtype_oid=1114, array_oid=3909)
tsrange_caster._register()
tstzrange_caster = RangeCaster('tstzrange', DateTimeTZRange,
oid=3910, subtype_oid=1184, array_oid=3911)
tstzrange_caster._register()
|
UNINETT/PyMetric | refs/heads/master | command.py | 1 | from cmd import Cmd
import readline
import networkx as nx
import sys
import os.path
from pajek import read_pajek
from model import Simulation, Model
from config import Config
from plotting import PlotUI
from textwrap import TextWrapper
from termcolor import colored, colored2, colored3
import utils
class MetricShell(Cmd):
def __init__(self, model=None, filename="model.net",
linkloads=False, debug=False):
self.histfile = os.path.join(os.environ["HOME"], ".pymetric-hist")
try:
readline.read_history_file(self.histfile)
except IOError:
pass
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
self.config = Config()
readline.set_completer_delims(" ")
Cmd.__init__(self)
self.termcolor = True
self._colormode(self.termcolor)
self.uiwait = False
self.defaultprompt = ">>> "
self.debug = debug
self.has_plotted = False
self.filename = filename
print
print "Initializing model...."
if not model:
self.model = Model(nx.Graph(), self.config)
self.model.refresh_from_file(self.filename)
if linkloads:
self.model.refresh_linkloads()
self.simulation = Simulation(self.model)
self.gui = PlotUI(self)
self.tw = TextWrapper(initial_indent=' '*4,
subsequent_indent=' '*4,
width=80)
self.intro = self.bt("PyMetric interactive shell, type 'help' for help")
self.prompt = self.defaultprompt
def cmdloop(self):
while 1:
try:
Cmd.cmdloop(self)
except KeyboardInterrupt:
self.intro = "\n"
pass
def fromui(self, node):
if not self.uiwait:
self.do_info(node)
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def postcmd(self,stop,line):
if self.simulation.is_active():
promptstring = "(sim) "
if self.simulation.has_changes():
effects = self.simulation.get_effects()
multinodes = filter(lambda x: len(effects[x].keys()) >= 5,
effects.keys())
multimulti = filter(lambda x: len(effects[x].keys()) >= 20,
effects.keys())
difflen = nx.average_shortest_path_length(self.simulation.graph)\
- nx.average_shortest_path_length(self.model.G)
components = list(nx.connected_component_subgraphs(
self.simulation.graph.to_undirected()))
cnodes = 0
if len(components) > 1:
cnodes = sum([len(g.nodes()) for g in components[1:]])
try:
diffrad = nx.radius(self.simulation.graph)\
- nx.radius(self.model.G)
except:
diffrad = None
try:
diffdia = nx.diameter(self.simulation.graph)\
- nx.diameter(self.model.G)
except:
diffdia = None
uzs = None
uzs_50_cnt = 0
uzs_75_cnt = 0
uzs_95_cnt = 0
drop_warning = False
if self.model.has_linkloads():
uzs = self.simulation.get_link_utilizations()
for (u,v) in uzs:
if uzs[(u,v)] >= 0.95:
uzs_95_cnt += 1
if uzs[(u,v)] >= 1:
drop_warning = True
elif uzs[(u,v)] >= 0.75:
uzs_75_cnt += 1
elif uzs[(u,v)] >= 0.5:
uzs_50_cnt += 1
promptstring += "(" \
+ self.pbblt("%dc" % self.simulation.no_changes()) \
+ "/" \
+ self.pblt("%d:" % len(effects.keys())) \
+ self.pblt("%d:" % len(multinodes)) \
+ self.pblt("%dn" % len(multimulti))
if uzs:
promptstring += "/"
promptstring += self.pblt("%d:" % uzs_50_cnt) \
+ self.pblt("%d:" % uzs_75_cnt)
if uzs_95_cnt:
promptstring += self.prt("%du" % uzs_95_cnt)
else:
promptstring += self.pblt("%du" % uzs_95_cnt)
if difflen and difflen >= 0.01:
promptstring += "/"
promptstring += self.pblt("%.2fL" % difflen)
if diffrad:
promptstring += "/"
promptstring += self.pblt("%sr" % diffrad)
if diffdia:
promptstring += "/"
promptstring += self.pblt("%sd" % diffdia)
if len(components) > 1:
promptstring += "/"
promptstring += self.prt("%d:%dp" % (len(components), cnodes))
print self.bt("Warning:") + " Network is partitioned"
promptstring += ") "
if drop_warning:
print self.bt("Warning:") + " There are traffic drops"
acnodes = self.simulation.get_anycast_nodes()
if acnodes:
no_acgroups = len(acnodes)
groupcounts = [str(len(self.simulation.get_anycast_group(x)))
for x in sorted(acnodes)]
promptstring += "(" \
+ self.pbgrt("%da" % no_acgroups) \
+ "/" \
+ self.pgrt(":".join(groupcounts) + "m") \
+ ") "
promptstring += ">>> "
self.prompt = promptstring
else:
self.prompt = self.defaultprompt
return stop
def emptyline(self):
return
#
# Global commands
#
def do_version(self, args):
if self.version:
print "PyMetric %s" % self.version
else:
print "Unknown version"
return
def do_colors(self, args):
color = self.termcolor
if args:
if args == 'on':
color = True
elif args == 'off':
color = False
else:
print "Unknown argument:", args
return
else:
print "Colormode is currently %s" % self.termcolor
return
self._colormode(color)
print "Colormode is now %s" % self.termcolor
return
def do_reload(self, args):
if self.simulation.is_active():
print "Please end simulation before reload."
return
self.model.refresh_from_file(self.filename)
if self.gui.has_plotted:
self.do_plot("")
return
def do_linkloads(self, args):
if not self.config.get('use_linkloads'):
print "Link loads are not enabled"
return
if self.model.refresh_linkloads():
print "OK, loads refreshed, use 'plot with-load' to plot"
if not self.simulation.linkloads:
self.simulation.linkloads = self.model.linkloads
if self.simulation.has_changes():
self.simulation._refresh_linkload()
return
print "Couldn't refresh load data"
return
def do_load(self, args):
if self.simulation.is_active():
print "Please end simulation before loading new file."
return
if not args:
print "Must specify filename to load model from."
return
confirm = raw_input('Are you sure you want to load new model [Y/N]? ')
if not confirm.lower() == 'y':
return
try:
self.model.refresh_from_file(args)
self.filename = args
except IOError:
print "ERROR: Could not read file, does it exist?"
self.model.refresh_from_file(self.filename)
except:
import traceback
traceback.print_exc()
print
print "ERROR: Load failed, model not changed."
self.model.refresh_from_file(self.filename)
return
def do_utilizations(self, args):
if self.simulation.is_active():
model = self.simulation
else:
model = self.model
if not self.model.has_linkloads():
print "No link load information available. Use 'linkloads' to fetch."
return
utils = model.get_link_utilizations()
sorted_utils = sorted(utils,
cmp=lambda x, y: cmp(utils[x], utils[y]))
sorted_utils.reverse()
ab75_utils = filter(lambda x: utils[x] >= 0.75, sorted_utils)
print "Utilizations, type 'linkinfo source dest' for more details"
print
if ab75_utils:
print ">75% utilization:"
for (u,v) in ab75_utils:
print " * %s->%s: %.2f%%" % (u,v, utils[(u,v)]*100)
print
print "Top 10:"
for (u,v) in sorted_utils[:10]:
print " * %s->%s: %.2f%%" % (u,v, utils[(u,v)]*100)
print
def do_list(self, args):
print "List of nodes:"
print sorted(self.model.get_nodes())
def do_asymmetric(self, args):
model = self.model
G = model.G
if self.simulation.is_active():
model = self.simulation
G = model.graph
uneven = model.uneven_metrics()
printed = {}
if not uneven:
print "All link metrics are symmetric"
return
print "Links with asymmetric metrics (%d):" \
% (len(uneven)/2)
print
for (u,v,w) in uneven:
w = w['weight']
x = G[v][u]['weight']
if (v,u) in printed: continue
print "%-15s -> %s: %s" % (u,v,w)
print "%-15s -> %s: %s" % (v,u,x)
printed[(u,v)] = True
print
return
def do_eval(self, args):
if not args:
return
retstr = ""
evalstr = "retstr=%s" % args
try:
exec(evalstr)
except:
import traceback
print "An error occured:"
traceback.print_exc()
return
print retstr
return
def do_simplot(self, args):
if not self.simulation.is_active():
print "No simulation is active, type 'simulation start' to start one"
return
suppress_default_metric = True
subargs = args.split()
cmap=False
capa=False
if 'with-load' in subargs:
if self.simulation.linkloads:
cmap = {}
capa = {}
for (u,v) in self.simulation.graph.edges():
cmap[(u,v)] = self.simulation.get_link_utilization(u,v)
capa[(u,v)] = self.model.get_link_capacity(u,v)
else:
print "Warning: No linkloads are defined. Use 'linkloads' to update."
elif self.simulation.get_anycast_nodes():
self.do_anycast("")
return
if 'all-metrics' in subargs:
suppress_default_metric = False
self.gui.clear()
graphdata = {}
graphdata['nodegroups'] = self.simulation.get_node_groups()
graphdata['edgegroups'] = self.simulation.get_edge_groups()
G = self.simulation.graph
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'],
suppress_default_metric)
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Simulated topology"
if cmap:
graphdata['title'] += " - Loads and Capacity view"
self.gui.plot(G, graphdata, edge_cmap=cmap, edge_capa=capa)
def do_png(self, args):
fname = "isis-metrics.png"
subargs = args.split()
plotarg = ""
load = False
if len(subargs) >= 1: fname = subargs[0]
if len(subargs) == 2 and subargs[1] == 'with-load':
plotarg = "with-load"
load = True
if self.simulation.is_active():
self.do_simplot(plotarg)
else:
self.do_plot(plotarg)
self.gui.savefig(fname, load)
return
def do_plot(self, args):
self.gui.clear()
subargs = args.split()
if "simulation" in subargs and self.simulation.is_active():
self.do_simplot(args)
return
suppress_default_metric = True
cmap = False
capa = False
if 'with-load' in subargs:
if self.model.linkloads:
cmap = {}
capa = {}
for (u,v) in self.model.graph.edges():
cmap[(u,v)] = self.model.get_link_utilization(u,v)
capa[(u,v)] = self.model.get_link_capacity(u,v)
else:
print "Warning: No linkloads are defined. Use 'linkloads' to update."
if 'all-metrics' in subargs:
suppress_default_metric = False
graphdata = {}
G = self.model.G
graphdata['nodegroups'] = self.model.get_node_groups()
graphdata['edgegroups'] = self.model.get_edge_groups()
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'],
suppress_default_metric)
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Current topology"
if cmap:
graphdata['title'] += " - Loads and Capacity view"
self.gui.plot(G, graphdata, edge_cmap=cmap, edge_capa=capa)
def do_areaplot(self, args):
if not self.config.get('use_areas'):
print "IS-IS areas are not enabled"
return
G = self.model.G
areas = self.model.get_areas(G.nodes())
if not areas:
print "No IS-IS areas known"
return
self.gui.clear()
graphdata = {}
graphdata['nodegroups'] = self.model.get_node_groups()
graphdata['areagroups'] = areas
graphdata['edgegroups'] = self.model.get_edge_groups()
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'])
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Current topology with IS-IS areas"
self.gui.plot(G, graphdata, areagroups=True)
def do_stats(self, args):
model = self.model
stats = self.model.get_stats()
if self.simulation.is_active():
model = self.simulation
stats = model.get_stats()
self.tw.initial_indent=''
self.tw.subsequent_indent=' '*18
self.tw.width=80-18
for (name, value) in sorted(stats.items()):
if type(value) == type([]):
value = ", ".join(value)
value = str(value)
print "%s: %s" % (name.rjust(16), self.tw.fill(value))
self.tw.width=80
def do_linkinfo(self, args):
model = self.model
if self.simulation.is_active():
model = self.simulation
transit_info = False
subargs = args.split()
if not len(subargs) >= 2:
print "Must specify two nodes"
return
self.help_linkinfo()
(u,v) = subargs[:2]
if not model.graph.has_edge(u,v):
print "Model has no link (%s,%s)" % (u,v)
return
if len(subargs) == 3:
if subargs[2] == 'with-transit':
transit_info = True
self.tw.initial_indent = ''
self.tw.subsequent_indent = ' '*17
self.tw.width = 80 - 17
print "Information for link (%s,%s):" % (u,v)
infohash = model.get_link_info(u,v)
for key in ['name', 'betweenness',
'capacity', 'load', 'utilization']:
if key not in infohash:
continue
info = infohash[key]
if type(info) == type([]):
info = ', '.join(info)
info = str(info)
print "%-15s: %s" % (key, self.tw.fill(info))
print
if transit_info:
transitinfo = model.get_transit_links(u,v)
if not transitinfo:
print "No paths are using this link as a transit link."
self.tw.width = 80
return
transit_by_start_node = {}
print "Paths using (%s, %s) as transit link:" % (u,v)
for (start_node, end_node) in transitinfo:
if not start_node in transit_by_start_node:
transit_by_start_node[start_node] = [end_node]
else:
transit_by_start_node[start_node].append(end_node)
self.tw.subsequent_indent = ' '*24
self.tw.width= 80
for start_node in sorted(transit_by_start_node):
print " * %-16s -> %s" % (start_node, self.tw.fill(", ".join(sorted(transit_by_start_node[start_node]))))
self.tw.width = 80
def do_info(self, args):
model = self.model
if self.simulation.is_active():
model = self.simulation
if not args:
print "Must specify a node"
return
self.help_info()
if args not in model.graph.nodes():
print "%s is not a valid node name" % args
return
self.tw.initial_indent = ''
self.tw.subsequent_indent = ' '*17
self.tw.width = 80 - 17
print "Information for node %s:" % (args)
infohash = model.get_node_info(args)
for key in ['name', 'degree', 'eccentricity',
'betweenness', 'neighbors', 'links',
'longest paths', 'anycast group']:
if key not in infohash:
continue
info = infohash[key]
if type(info) == type([]):
info = ', '.join(info)
info = str(info)
print "%-15s: %s" % (key, self.tw.fill(info))
print
self.tw.width = 80
def do_simulation(self, args):
if args == 'stop':
if self.simulation.is_active():
print self.bt("Simulation ended")
self.simulation.stop()
else:
print "No simulation is active, type 'simulation start' to start one"
else:
if self.simulation.is_active():
print "Simulation allready in progress, type 'stop' to end it."
return
print self.bt("Simulation mode active, type 'stop' or 'simulation stop' to end it")
self.simulation.start()
def do_undo(self, args):
if not self.simulation.is_active():
print "No simulation is active, type 'simulation start' to start one"
return
if not args:
print "Must supply a change (number) or 'all'"
return
if args and args == 'all':
while self.simulation.has_changes():
self.simulation.undo(1)
return
try:
change_no = int(args)
except:
print "Please supply a valid change number (integer)"
return
if not self.simulation.undo(change_no):
print "No such change, type 'changes' to see all changes"
return
print "Done"
return
def do_stop(self, args):
if not self.simulation.is_active():
print "No simulation is active, type 'simulation start' to start one"
return
self.do_simulation("stop")
def do_changes(self, args):
if not self.simulation.is_active():
print "No simulation is active, type 'simulation start' to start one"
return
if not self.simulation.has_changes():
print "No changes"
return
print "Simulated changes:"
for (i, change) in enumerate(self.simulation.get_changes_strings()):
print " %d: %s" % (i+1, change)
print
subargs = args.split()
if 'as-commands' in subargs:
print "As commands:"
for cmd in self.simulation.get_changes_strings(commands=True):
print " %s" % (cmd)
print
if 'no-effects' in subargs:
return
if not self.simulation.has_effects():
print "No effect on model"
return
print "Effects of changes:"
for arg in subargs:
if arg not in self.simulation.get_nodes(): continue
nodechanges = self.simulation.get_effects_node(arg)
if not nodechanges.keys(): print " * No changes for %s " % (arg)
print " * Details for %s " % (arg)
print
self.tw.initial_indent=''
self.tw.subsequent_indent=' '*17
self.tw.width=80-17
for dest in nodechanges.keys():
ddiffs = nodechanges[dest]
for diff in ddiffs:
if not diff['new']:
print " - %s now unreachable" % (dest)
print " Was reachable via %s" % ("/".join(diff['old']))
else:
print " - Path to %s now via %s" % (dest,
"/".join(diff['new']))
print " instead of %s" % ("/".join(diff['old']))
print
return
print " * Affects %d nodes total" % (len(self.simulation.get_effects()))
print
print " * Summary:"
srcsummary, dstsummary = self.simulation.get_effects_summary()
for src in sorted(srcsummary):
count = len(srcsummary[src])
if count > 3:
for dest in dstsummary:
if src in dstsummary[dest]:
del dstsummary[dest][dstsummary[dest].index(src)]
print " %s changed path to %d destinations" % (src, count)
if count < 6:
print " => %s" % (sorted(srcsummary[src]))
print
else:
print
for dest in sorted(dstsummary):
count = len(dstsummary[dest])
if count == 0: continue
if count < 3:
print " %s changed path to %s" \
% (" and ".join(sorted(dstsummary[dest])), dest)
else:
print " %d sources changed path to %s" % (count, dest)
if count > 3 and count < 6:
print " => %s" % (sorted(dstsummary[dest]))
print
else:
print
def do_anycast(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to model anycast"
return
subargs = args.split()
if not subargs:
acnodes = self.simulation.get_anycast_nodes()
if not acnodes:
print "No anycast nodes configured in current simulation."
else:
print "Current anycast nodes:"
for node in acnodes:
members = self.simulation.get_anycast_group(node)
print " * %-15s (%s members)" \
% (node, str(len(members)).rjust(2))
self.gui.clear()
graphdata = {}
G = self.simulation.graph
acgroups = self.simulation.get_anycast_groups_by_source()
graphdata['nodegroups'] = self.simulation.get_node_groups()
graphdata['acnodes'] = acnodes
graphdata['acgroups'] = acgroups
graphdata['edgegroups'] = self.simulation.get_edge_groups()
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'])
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Simulated topology with anycast groups"
self.gui.plot(G, graphdata, anycast=True)
return
elif len(subargs) == 1:
if subargs[0] == 'clear':
self.simulation.remove_anycast_nodes(self.simulation.get_anycast_nodes())
return
else:
print "Invalid input"
return
self.help_anycast()
elif len(subargs) > 1:
if subargs[0] == 'add':
for node in subargs[1:]:
if not node in self.simulation.graph.nodes():
print "Invalid node: %s" % node
return
self.simulation.add_anycast_nodes(subargs[1:])
elif subargs[0] == 'remove':
for node in subargs[1:]:
if not node in self.simulation.graph.nodes():
print "Invalid node: %s" % node
return
self.simulation.remove_anycast_nodes(subargs[1:])
else:
print "Invalid input"
return
self.help_anycast()
def do_reroute(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to run reroute"
return
subargs = args.split()
if not len(subargs) >= 3:
print "Invalid input"
self.help_reroute()
return
equal = False
start, end, via = subargs[:3]
if len(subargs) == 4:
if subargs[3] == 'equal-path':
equal = True
else:
print "Warning: Last argument (%s) ignored" % subargs[3]
ret = self.simulation.reroute(start, end, via, equal)
if not ret[0]:
print "No solution could be found.."
return
if not ret[1]:
print "The path allready goes through %s" % via
return
print "The following metric changes are suggested:"
G = self.simulation.graph
shown = {}
for e in sorted(ret[1]):
u,v,w = e[0], e[1], ret[1][e]['weight']
if (u,v) in shown: continue
w_old = G[u][v]['weight']
if w_old != w:
linkstr = "%s <-> %s" % (u,v)
oldstr = "%s" % int(w_old)
newstr = "%s" % int(w)
print "%-40s %s -> %s" % (linkstr, oldstr.rjust(2),
newstr.rjust(2))
shown[(u,v)] = True
shown[(v,u)] = True
apply = raw_input("Apply changes to current simulation (Y/N)? ")
applied = {}
if apply.lower() == 'y':
for e in ret[1]:
u,v,w = e[0], e[1], ret[1][e]
if (u,v) in applied: continue
w_old = G[u][v]['weight']
if w_old != w:
self.simulation.change_metric(u,v,w, True)
applied[(u,v)] = True
applied[(v,u)] = True
else:
print "Not applied."
return
def do_minimize(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to run minimize"
return
G = self.simulation.graph
print "Please wait, this can take a little while..."
H = self.simulation.minimal_link_costs()
shown = {}
header = False
for (u,v,w) in sorted(H.edges(data=True)):
if (u,v) in shown: continue
w = H[u][v]['weight']
w_old = G[u][v]['weight']
if w_old != w:
if not header:
print "The following metric changes are suggested:"
header = True
linkstr = "%s <-> %s" % (u,v)
oldstr = "%s" % int(w_old)
newstr = "%s" % int(w)
print "%-40s %s -> %s" % (linkstr, oldstr.rjust(2),
newstr.rjust(2))
shown[(u,v)] = True
shown[(v,u)] = True
apply = raw_input("Apply changes to current simulation (Y/N)? ")
applied = {}
if apply.lower() == 'y':
for (u,v,w) in H.edges(data=True):
if (u,v) in applied: continue
w = H[u][v]['weight']
w_old = G[u][v]['weight']
if w_old != w:
self.simulation.change_metric(u,v,w, True)
applied[(u,v)] = True
applied[(v,u)] = True
else:
print "Not applied."
return
def do_metric(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to run metric changes"
return
if not args:
print "Invalid input"
self.help_metric()
return
subargs = args.split()
if not len(subargs) >= 3:
print "Invalid input"
self.help_metric()
return
bidir = None
(n1, n2, metric) = subargs[0:3]
if len(subargs) == 4:
if subargs[3] == 'one-way':
bidir = False
elif subargs[3] == 'two-way':
bidir = True
else:
print "Warning: last argument ignored: %s" % (subargs[3])
if self.simulation.changes:
for i, change in enumerate(self.simulation.changes):
if change['type'] == Simulation.SC_METRIC \
and change['pair'] == (n1,n2):
self.simulation.undo(i+1)
if not self.simulation.change_metric(n1, n2, metric,bidir=bidir):
print "No link from %s to %s" % (n1, n2)
def do_linkfail(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to run link changes"
return
if not args:
print "Invalid input"
self.help_linkfail()
return
subargs = args.split()
if not len(subargs) == 2:
print "Invalid input"
self.help_linkfail()
return
(n1, n2) = subargs[0:2]
if not self.simulation.linkfail(n1, n2):
print "No link from %s to %s" % (n1, n2)
def do_routerfail(self, args):
if not self.simulation.is_active():
print "Must be in simulation mode to run router changes"
return
if not args:
print "Invalid input"
self.help_routerfail()
return
subargs = args.split()
if not len(subargs) == 1:
print "Invalid input"
self.help_routerfail()
return
n1 = subargs[0]
if not self.simulation.routerfail(n1):
print "No node %s in current topology."
def do_simpath(self, args):
if not self.simulation.is_active():
print "No simulation active, type 'simulation' to start one"
return
subargs = args.split()
if not len(subargs) == 2:
self.help_simpath()
return
a, b = subargs[0], subargs[1]
if a not in self.simulation.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (a)
return
if b not in self.simulation.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (b)
length, paths = self.simulation.path(a,b)
if not length and not paths:
print "No valid path from %s to %s in model"
return
print "Path from %s to %s:"\
% (a, b)
print " * Cost: %d" % (length)
if len(paths) > 1:
print " * %d paths total (equal cost):" % len(paths)
print
self.tw.subsequent_indent = ' '*5
for path in paths:
print " * Hops: %d" % (len(path))
print " * Path:\n", self.tw.fill(" => ".join(path))
print " * Slowest link: ", self.model.get_path_capacity(path,
True,
True)
print
graphdata = {}
self.gui.clear()
G = self.simulation.graph
path = paths[0]
hops = str(len(paths[0]))
if len(paths) > 1:
path = reduce(lambda x,y: x+y, paths)
hops = "/".join(map(lambda x: str(len(x)), paths))
graphdata['nodegroups'] = self.simulation.get_node_groups(path=path)
graphdata['edgegroups'] = self.simulation.get_edge_groups(path=paths)
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'])
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Simulated path from %s to %s (cost: %d, %s hops)" \
% (a, b, length, hops)
self.gui.plot(G, graphdata, 0.7)
def do_diffpath(self, args):
if not self.simulation.is_active():
print "No simulation active, type 'simulation' to start one"
return
subargs = args.split()
if not len(subargs) == 2:
self.help_diffpath()
return
a, b = subargs[0], subargs[1]
if a not in self.simulation.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (a)
return
if b not in self.simulation.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (b)
slength, spaths = self.simulation.path(a,b)
length, paths = self.model.path(a,b)
print "Path from %s to %s:"\
% (a, b)
if not length:
print "Path does not exist in model."
return
if not slength:
print "Path no longer possible in simulation."
print "Type 'path %s %s' to see original path." % (a,b)
return
shops = str(len(spaths[0]))
hops = str(len(paths[0]))
spath = spaths[0]
path = paths[0]
if len(spaths) > 1:
spath = reduce(lambda x,y: x+y, spaths)
shops = "/".join(map(lambda x: str(len(x)), spaths))
if len(paths) > 1:
path = reduce(lambda x,y: x+y, paths)
hops = "/".join(map(lambda x: str(len(x)), paths))
if len(spaths) > 1 or len(paths) > 1:
print " * %d vs %d paths total:" % (len(spaths), len(paths))
print
print " * Cost: %d vs %d" % (slength, length)
print " * Hops: %s vs %s" % (shops, hops)
self.tw.initial_indent = ' '*5
self.tw.subsequent_indent = ' '*5
for i in range(max(len(paths), len(spaths))):
if i < len(spaths):
print " * Path:\n", self.tw.fill(" => ".join(spaths[i]))
print " * Slowest link: ", self.model.get_path_capacity(spaths[i],
True,
True)
else:
print " * NA"
print " vs."
if i < len(paths):
print "%s" % self.tw.fill(" => ".join(paths[i]))
print " * Slowest link: ", self.model.get_path_capacity(paths[i],
True,
True)
else:
print " NA"
print
graphdata = {}
self.gui.clear()
G = self.simulation.graph
H = self.model.G
gng = self.simulation.get_diff_node_groups(path,spath)
geg = self.simulation.get_diff_edge_groups(paths,spaths)
lb = utils.short_names(H.nodes())
elb = utils.edge_labels(H.edges(data=True), geg)
pos = self.model.get_positions(H.nodes())
graphdata['title'] = "Simulated path from %s to %s (cost: %d, %s hops)" \
% (a, b, slength, shops)
graphdata['nodegroups'] = gng
graphdata['edgegroups'] = geg
graphdata['labels'] = lb
graphdata['edgelabels'] = elb
graphdata['pos'] = pos
self.gui.plot(H, graphdata, 0.7)
def do_listequal(self, args):
model = self.model
if self.simulation.is_active():
model = self.simulation
nodes = model.get_nodes()
equal = {}
for source in nodes:
for dest in nodes:
if source == dest: continue
length, paths = model.path(source, dest)
if len(paths) > 1:
if source in equal:
equal[source].append((dest, len(paths)))
else:
equal[source] = [(dest, len(paths))]
if not equal:
print "No equal-cost paths found in model."
return
print "Equal-cost paths ('path source dest' for details):"
self.tw.initial_indent = ' '*2
self.tw.subsequent_indent = ' '*25
self.tw.width=80-23
for source,dests in sorted(equal.items()):
dststr = []
for d in sorted(dests):
appstr = ""
if d[1] > 2:
appstr = " (%s)" % d[1]
dststr.append("%s%s" % (d[0], appstr))
print " * %-15s -> %s" % (source, self.tw.fill(", ".join(dststr)))
return
def do_path(self, args):
subargs = args.split()
if not len(subargs) == 2:
self.help_path()
return
a, b = subargs[0], subargs[1]
if a not in self.model.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (a)
return
if b not in self.model.get_nodes():
print "%s not a valid node, type 'list' to see nodes"\
% (b)
return
length, paths = self.model.path(a,b)
if not length and not paths:
print "No valid path from %s to %s in model"
return
print "Path from %s to %s:"\
% (a, b)
print " * Cost: %d" % (length)
if len(paths) > 1:
print " * %d paths total (equal cost):" % len(paths)
#if len(paths) != len(selection):
# print " => %d path(s) preferred" % (len(selection))
print
self.tw.initial_indent = ' '*5
self.tw.subsequent_indent = ' '*6
for path in paths:
print " * Hops: %d" % (len(path))
print " * Path:\n", self.tw.fill(" => ".join(path))
print " * Slowest link: ", self.model.get_path_capacity(path,
True,
True)
#if len(paths) != len(selection) and path in selection:
# print " <<preferred path>>"
print
self.gui.clear()
graphdata = {}
path = paths[0]
hops = str(len(paths[0]))
if len(paths) > 1:
path = reduce(lambda x,y: x+y, paths)
hops = "/".join(map(lambda x: str(len(x)), paths))
G = self.model.G
graphdata['nodegroups'] = self.model.get_node_groups(path=path)
graphdata['edgegroups'] = self.model.get_edge_groups(path=paths)
graphdata['labels'] = utils.short_names(G.nodes())
graphdata['edgelabels'] = utils.edge_labels(G.edges(data=True),
graphdata['edgegroups'])
graphdata['pos'] = self.model.get_positions(G.nodes())
graphdata['title'] = "Path(s) from %s to %s (cost: %d, %s hops)" \
% (a, b, length, hops)
self.gui.plot(G, graphdata, 0.7)
def do_sim(self, args):
return self.do_simulation(args)
def do_help(self, args):
if args:
Cmd.do_help(self, args)
return
cmdlist = map(lambda x: x.ljust(12),
map(lambda x: x.replace('help_', ''),
filter(lambda x: x.startswith('help_'), dir(self))))
print """
The program allows you to view topology and metrics, and to
simulate various changes to investigate their impact on the
routing.
To view the current topology use 'plot'. To trace the path
between two nodes use 'path A B'.
In order to make changes, make sure you enter
simulation mode first. This is done with the 'simulation'
(or just 'sim') command.
When in simulation mode you can view the current topology and trace paths
with 'simplot' and 'simpath' respectivly. You can view the
effects of the simulation with the 'changes' command.
If you want a graphical comparison of paths, use the 'diffpath' command
while in simulation mode.
More help is available per command, just type 'help <command>'.
Available commands:
=================="""
cmdstring = " ".join(sorted(cmdlist))
self.tw.initial_indent = ''
self.tw.subsequent_indent = ''
print self.tw.fill(cmdstring)
print
#
# Completions
#
def complete_path(self, text, line, begidx, endidx):
if self.debug:
print "text:", text
print "line:", line
tokens = line.split()
length = len(tokens)
model = self.model
if self.simulation.is_active():
model = self.simulation
if tokens[0] in ['metric', 'linkfail', 'linkinfo'] and \
((length == 2 and not text) or length >= 3):
startnode = tokens[1]
return filter(lambda x: x.startswith(text),
model.graph.neighbors(startnode))
return filter(lambda x: x.startswith(text),
model.graph.nodes())
def complete_colors(self, text, line, begidx, endidx):
return ['on', 'off']
def complete_simpath(self, text, line, begidx, endidx):
return self.complete_path(text, line, begidx, endidx)
def complete_diffpath(self, text, line, begidx, endidx):
return self.complete_path(text, line, begidx, endidx)
def complete_metric(self, text, line, begidx, endidx):
length = len(line.split())
if length == 3 and not text:
return []
elif length == 4 and not text:
return ['one-way', 'two-way']
elif length == 5:
return filter(lambda x: x.startswith(text), ['one-way', 'two-way'])
else:
return self.complete_path(text, line, begidx, endidx)
def complete_linkfail(self, text, line, begidx, endidx):
return self.complete_path(text, line, begidx, endidx)
def complete_routerfail(self, text, line, begidx, endidx):
return self.complete_path(text, line, begidx, endidx)
def complete_changes(self, text, line, begidx, endidx):
length = len(line.split())
return filter(lambda x: x.startswith(text), ['as-commands', 'no-effects']) \
+ self.complete_path(text, line, begidx, endidx)
def complete_stats(self, text, line, begidx, endidx):
return []
def complete_plot(self, text, line, begidx, endidx):
return filter(lambda x: x.startswith(text), ['with-load', 'all-metrics'])
def complete_simplot(self, text, line, begidx, endidx):
return self.complete_plot(text, line, begidx, endidx)
def complete_info(self, text, line, begidx, endidx):
return self.complete_path(text, line, begidx, endidx)
def complete_linkinfo(self, text, line, begidx, endidx):
length = len(line.split())
if length == 3 and not text:
return ['with-transit']
elif length == 4:
return filter(lambda x: x.startswith(text), ['with-transit'])
else:
return self.complete_path(text, line, begidx, endidx)
def complete_reroute(self, text, line, begidx, endidx):
length = len(line.split())
if length == 4 and not text:
return ['equal-path']
elif length == 5:
return filter(lambda x: x.startswith(text), ['equal-path'])
else:
return self.complete_path(text, line, begidx, endidx)
def complete_anycast(self, text, line, begidx, endidx):
tokens = line.split()
length = len(tokens)
if length == 1 and not text:
return ['add ', 'remove ', 'clear']
elif length == 2 and text:
return filter(lambda x: x.startswith(text),
['add ', 'remove ', 'clear'])
elif length >= 2:
if tokens[1] == 'clear':
return []
if tokens[1] == 'remove':
return filter(lambda x: x.startswith(text),
self.simulation.get_anycast_nodes())
return self.complete_path(text, line, begidx, endidx)
#
# Help-methods
#
def help_version(self):
print """
Usage: version
Display the program version.
"""
def help_colors(self):
print """
Usage: colors (on|off)
Display or set current color-setting.
When set to off no ANSI colors will be used
in the terminal output.
"""
def help_path(self):
print """
Usage: path A B
Display shortest path from node A to node B.
Alternate equal-cost paths will be shown
with dashed lines.
"""
def help_simpath(self):
print """
Usage: simpath A B
Display shortest path from node A to node B
given the current simulated changes.
Alternate equal-cost paths will be shown
with dashed lines.
"""
def help_diffpath(self):
print """
Usage: diffpath A B
Display shortest path from node A to node B
given the current simulated changes, and show
the difference compared to the original path.
The original path will be drawn in light
yellow, while nodes and paths common to both
will be drawn with a dark blue.
Alternate equal-cost paths will be shown
with dashed lines.
"""
def help_reroute(self):
print """
Usage: reroute A B C (equal-path)
Try to find a suitable set of metric changes
to make the shortest path from A to B go
through C. Works on the current simulated model.
If the equal-path option is given the result will
contain multiple cost-equal paths if possible.
"""
def help_anycast(self):
print """
Usage: anycast (add|remove|clear) (<node1 node2 ...>)
Add or remove nodes as anycast nodes.
When no argument is given list the current anycast
nodes and display a plot of anycast members.
"""
def help_minimize(self):
print """
Usage: minimize
Try to reduce as many of the metrics as possible,
making them as small as possible whilst preserving
every shortest path in the simulated model.
"""
def help_load(self):
print """
Usage: load <filename>
Load the topology and metrics from
the given file, replacing base model.
Useful if you work with several models,
you don't have to restart the program
to switch between them.
"""
def help_reload(self):
print """
Usage: reload
Reload the topology and metrics from
file, replacing base model.
"""
def help_plot(self):
print """
Usage: plot (with-load) (all-metrics)
Display metrics and topology graphically.
If given the 'with-load' option, instead
display current link utilizations.
If given the 'all-metrics' option, default metrics
will also be drawn.
"""
def help_asymmetric(self):
print """
Usage: asymmetric
List links with asymmetric metrics.
If a simulation is active this shows
data from the current simulated topology.
"""
def help_quit(self):
print """
Usage: quit
End program.
"""
def help_areaplot(self):
print """
Usage: areaplot
Display metrics and topology graphically.
Show the different IS-IS areas with different
colors
"""
def help_stop(self):
print """
Usage: stop
End current simulation.
"""
def help_list(self):
print """
Usage: list
List the name of all nodes.
"""
def help_linkinfo(self):
print """
Usage: linkinfo <source> <destination> (with-transit)
Show various information about the link
between the source and destination nodes.
If available, also shows the capacity and
current utilization i Mbit/s.
If given the with-transit option show (start,end) pairs
using this link as a transit link.
"""
def help_listequal(self):
print """
Usage: listequal
List all equal-cost (source, destinations) pairs.
If a simulation is active, the simulated model
is used in the computations.
If number of paths is greater than two, the
number of paths is printed after each destination.
"""
def help_png(self):
print """
Usage: png (filename)
Save current topology to a PNG file.
If a simulation is active, saves the
simulated topology.
"""
def help_simplot(self):
print """
Usage: simplot (with-load)
Display metrics and topology graphically
given the current simulated changes.
If any anycast nodes exists, plot with
anycast groups displayed (equivalent to
running 'anycast' without arguments)
If given the 'with-load' option, instead
display current simulated link utilizations.
If given the 'all-metrics' option, default metrics
will also be drawn.
"""
def help_stats(self):
print """
Usage: stats
Display some statistics of current
topology. If a simulation is active
this shows statistics for the current
simulated topology.
"""
def help_linkloads(self):
print """
Usage: linkloads
Fetch updated link utilizations for the
last hour (average load). At the moment this
is a UNINETT specific command.
"""
def help_info(self):
print """
Usage: info <node>
Display some information and stats
about the given node. If a simulation
is active this uses the data from
the simulation.
"""
def help_changes(self):
print """
Usage: changes (as-commands|no-effects) (<source>)
Display current simulated changes
and their effects. For more detail
provide an optional source node.
To show current changes as commands (i.e.
to input on another machine/simulation)
use the 'as-commands' option.
To list only the changes, and not the effects
use the 'no-effects' option. These options can
both be given at the same time.
"""
def help_sim(self):
self.help_simulation()
def help_simulation(self):
print """
Usage: simulation (start|stop)
Enter or leave simulation mode. In order to simulate
metric changes and failures you have to be in simulation
mode.
In simulation mode the prompt changes to reflect the
changes to the model. The numbers and letters mean:
* Nc - #changes simulated
* N:N:Nn - #nodes with changed paths to >= 1, 5
and 20 nodes.
* N:N:Nu - #links with >= 50, 75 and 95% utilization
respectively. (If linkloads are active)
* NL - change in average shortest path length
* Nr - change in radius, ie. minimum all-pairs
shortest path distance.
* Nd - change in diameter, ie. maximum all-pairs
shortest path distance.
* N:Np - #partitions and #nodes not in the largest
partition, when parts of the network are cut
off from each other.
If anycast simulation is active, the following numbers
are displayed:
* Na - #anycast nodes/groups
* N:N:Nm - #members belonging to each anycast group
For more detail of changes, the 'stats', 'changes', 'linkinfo'
and 'diffpath'-commands might be useful.
"""
def help_metric(self):
print """
Usage:
metric <src> <dst> <value> (one-way|two-way)
Set metric for link between source and
destination to value. If metric is symmetric
this will set metric between destination
and source as well.
If the optional (one-way|two-way) argument is
given the metric is applied accordingly.
"""
def help_utilizations(self):
print """
Usage: utilizations
Display any links with utilization >= 75%, as well
as the top 10 utilized links.
"""
def help_linkfail(self):
print """
Usage: linkfail <source> <destination>
Simulate link failure between source and
destination
"""
def help_routerfail(self):
print """
Usage: routerfail <node>
Simulate router failure by removing
the node.
"""
def help_undo(self):
print """
Usage: undo <change #>
Undo the change with given number.
Type 'changes' to get a list of the changes.
"""
#
# Quit-methods
#
def do_EOF(self, arg):
if self.simulation.is_active():
print
self.do_simulation("stop")
print
return
self.do_quit(arg)
def do_quit(self, arg):
print "Bye!"
try:
readline.write_history_file(self.histfile)
except IOError:
pass
sys.exit(0)
# Private methods
def _colormode(self, on):
self.bt = lambda x: colored(x, attrs=['bold'])
self.pbt = lambda x: colored2(x, attrs=['bold'])
if on:
self.termcolor = True
self.prt = lambda x: colored2(x, 'red')
self.blt = lambda x: colored(x, 'magenta')
self.pblt = lambda x: colored2(x, 'magenta')
self.pbblt = lambda x: colored3(x, 'magenta')
self.pgrt = lambda x: colored2(x, 'cyan')
self.pbgrt = lambda x: colored3(x, 'cyan')
else:
self.termcolor = False
self.blt = lambda x: x
self.prt = lambda x: x
self.pblt = lambda x: x
self.pbblt = self.pbt
self.pgrt = lambda x: x
self.pbgrt = self.pbt
|
zaina/nova | refs/heads/master | nova/scheduler/filters/metrics_filter.py | 42 | # Copyright (c) 2014 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('weight_setting',
'nova.scheduler.weights.metrics',
group='metrics')
class MetricsFilter(filters.BaseHostFilter):
"""Metrics Filter
This filter is used to filter out those hosts which don't have the
corresponding metrics so these the metrics weigher won't fail due to
these hosts.
"""
def __init__(self):
super(MetricsFilter, self).__init__()
opts = utils.parse_options(CONF.metrics.weight_setting,
sep='=',
converter=float,
name="metrics.weight_setting")
self.keys = [x[0] for x in opts]
def host_passes(self, host_state, filter_properties):
unavail = [i for i in self.keys if i not in host_state.metrics]
if unavail:
LOG.debug("%(host_state)s does not have the following "
"metrics: %(metrics)s",
{'host_state': host_state,
'metrics': ', '.join(unavail)})
return len(unavail) == 0
|
remyroy/uwsgi | refs/heads/master | examples/bootstrap5.py | 34 | import uwsgi
print uwsgi.extract("data://0")
|
GenericStudent/home-assistant | refs/heads/dev | homeassistant/components/ovo_energy/config_flow.py | 5 | """Config flow to configure the OVO Energy integration."""
import aiohttp
from ovoenergy.ovoenergy import OVOEnergy
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import CONF_ACCOUNT_ID, DOMAIN # pylint: disable=unused-import
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class OVOEnergyFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a OVO Energy config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "cannot_connect"
else:
if authenticated:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=client.account_id,
data={
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_ACCOUNT_ID: client.account_id,
},
)
errors["base"] = "invalid_auth"
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
|
HPPTECH/hpp_IOSTressTest | refs/heads/master | Refer/IOST_OLD_SRC/IOST_0.19/Libs/IOST_WMain/IOST_WMain_I2C.py | 2 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WMain/IOST_WMain_I2C.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from Libs import IOST_Basic
from Libs import IOST_Config
from Libs import IOST_WSetupTestcase
import gtk
import gtk.glade
#======================================================================
IOST_WMain_I2C_Debug_Enable = 0
#======================================================================
class IOST_WMain_I2C():
"""
This is class to get all I2C object from IOST_WMain_Skylark window and control to these
component
"""
def __init__(self, glade_filename, window_name, builder=None):
"""
"""
self.IOST_WMain_I2C_window_name=window_name
if not builder:
self.IOST_I2C_Builder = gtk.Builder()
self.IOST_I2C_Builder.add_from_file(glade_filename)
self.IOST_I2C_Builder.connect_signals(self)
else:
self.IOST_I2C_Builder = builder
#----------------------------------------------------------------------
def GetI2C_Obj(self, window_name):
"""
Get all I2C objects on WMain window
"""
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"] = self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_IP_Enable_I2C_CB"])
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"])
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"])
#----------------------------------------------------------------------
def SetValueToI2C_Obj(self, window_name):
"""
Init all I2C objects when start IOST Wmain program
"""
if self.IOST_Data["I2C"] == "Enable":
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(True)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
if self.IOST_Data["I2C"+str(i)][0] == "Disable":
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(False)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#Update test case
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Data["I2C"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["I2C"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_B_clicked(self, object, data=None):
"Control to ConfigI2C-0 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C0"][0] = 'Enable'
else:
self.IOST_Data["I2C0"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C0"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_B_clicked(self, object, data=None):
"Control to ConfigI2C-1 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C1")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C1"][0] = 'Enable'
else:
self.IOST_Data["I2C1"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C1"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_B_clicked(self, object, data=None):
"Control to ConfigI2C-2 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C2"][0] = 'Enable'
else:
self.IOST_Data["I2C2"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C2"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_B_clicked(self, object, data=None):
"Control to ConfigI2C-3 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C3"][0] = 'Enable'
else:
self.IOST_Data["I2C3"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C3"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_B_clicked(self, object, data=None):
"Control to ConfigI2C-4 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C4")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C4"][0] = 'Enable'
else:
self.IOST_Data["I2C4"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C4"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_B_clicked(self, object, data=None):
"Control to ConfigI2C-5 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C5")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C5"][0] = 'Enable'
else:
self.IOST_Data["I2C5"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C5"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_I2C_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_IP_Enable_I2C_CB"].get_active()
self.IOST_WMain_I2C_set_sensitive_all(Res)
if Res:
self.IOST_Data["I2C"] = 'Enable'
else:
self.IOST_Data["I2C"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_I2C_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["I2C"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
|
jrrembert/django | refs/heads/master | tests/utils_tests/test_itercompat.py | 569 | from django.test import TestCase
from .models import Category, Thing
class TestIsIterator(TestCase):
def test_regression(self):
"""This failed on Django 1.5/Py2.6 because category has a next method."""
category = Category.objects.create(name='category')
Thing.objects.create(category=category)
Thing.objects.filter(category=category)
|
Workday/OpenFrame | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/apitools/apitools/base/py/exceptions.py | 23 | #!/usr/bin/env python
"""Exceptions for generated client libraries."""
class Error(Exception):
"""Base class for all exceptions."""
class TypecheckError(Error, TypeError):
"""An object of an incorrect type is provided."""
class NotFoundError(Error):
"""A specified resource could not be found."""
class UserError(Error):
"""Base class for errors related to user input."""
class InvalidDataError(Error):
"""Base class for any invalid data error."""
class CommunicationError(Error):
"""Any communication error talking to an API server."""
class HttpError(CommunicationError):
"""Error making a request. Soon to be HttpError."""
def __init__(self, response, content, url):
super(HttpError, self).__init__()
self.response = response
self.content = content
self.url = url
def __str__(self):
content = self.content.decode('ascii', 'replace')
return 'HttpError accessing <%s>: response: <%s>, content <%s>' % (
self.url, self.response, content)
@property
def status_code(self):
# TODO(craigcitro): Turn this into something better than a
# KeyError if there is no status.
return int(self.response['status'])
@classmethod
def FromResponse(cls, http_response):
return cls(http_response.info, http_response.content,
http_response.request_url)
class InvalidUserInputError(InvalidDataError):
"""User-provided input is invalid."""
class InvalidDataFromServerError(InvalidDataError, CommunicationError):
"""Data received from the server is malformed."""
class BatchError(Error):
"""Error generated while constructing a batch request."""
class ConfigurationError(Error):
"""Base class for configuration errors."""
class GeneratedClientError(Error):
"""The generated client configuration is invalid."""
class ConfigurationValueError(UserError):
"""Some part of the user-specified client configuration is invalid."""
class ResourceUnavailableError(Error):
"""User requested an unavailable resource."""
class CredentialsError(Error):
"""Errors related to invalid credentials."""
class TransferError(CommunicationError):
"""Errors related to transfers."""
class TransferRetryError(TransferError):
"""Retryable errors related to transfers."""
class TransferInvalidError(TransferError):
"""The given transfer is invalid."""
class RequestError(CommunicationError):
"""The request was not successful."""
class RetryAfterError(HttpError):
"""The response contained a retry-after header."""
def __init__(self, response, content, url, retry_after):
super(RetryAfterError, self).__init__(response, content, url)
self.retry_after = int(retry_after)
@classmethod
def FromResponse(cls, http_response):
return cls(http_response.info, http_response.content,
http_response.request_url, http_response.retry_after)
class BadStatusCodeError(HttpError):
"""The request completed but returned a bad status code."""
class NotYetImplementedError(GeneratedClientError):
"""This functionality is not yet implemented."""
class StreamExhausted(Error):
"""Attempted to read more bytes from a stream than were available."""
|
tietokilta-saato/tikplay | refs/heads/master | tikplay/provider/provider.py | 1 | #!/usr/bin/env python
# Part of tikplay
import importlib
import logging
import queue
from inspect import isclass
import provider.retrievers as retrievers
from .retriever import Retriever
from .task import Task
class Provider(object):
"""
Provides a method for getting the audio data off arbitrary URLs via different retriever modules that can be
dynamically loaded.
"""
def __init__(self, conf, register_all=True):
self.retrievers = []
"""The list of loaded handlers."""
self.child_exception_queue = queue.Queue()
"""A list of tuples (task, exception) used by the child threads for reporting exceptions."""
self.conf = conf
self.log = logging.Logger("Provider")
if register_all:
self.register_all()
def register_all(self):
"""Attempts to automatically register all retrievers in the relevant subdirectory."""
for module in retrievers.__all__:
module = importlib.import_module('provider.retrievers.' + module)
for name, retriever in module.__dict__.items():
# Discard non-classes
if not isclass(retriever):
continue
# Discard the abstract Retriever class that is imported in the module
if retriever is Retriever:
continue
# Discard non-Retrievers
if not issubclass(retriever, Retriever):
continue
self.log.info("Registering retriever %s.%s", module.__name__, name)
self.register_retriever(retriever)
def register_retriever(self, retriever_class):
"""Registers the given retriever, enabling the provider to download audio with the retriever."""
if not issubclass(retriever_class, Retriever):
raise TypeError("argument should be a subclass of Retriever")
instance = retriever_class(self.conf)
self.retrievers.append(instance)
self.retrievers.sort(key=lambda i: i.priority)
def canonicalize(self, url):
"""Canonicalizes the given URL, returning a suitable URI"""
for retriever in self.retrievers:
if retriever.handles_url(url):
return retriever.canonicalize_url(url)
raise ValueError("No provider found, cannot canonicalize " + url)
def get(self, uri):
"""Retrieves audio from the given URI asynchronously. Returns a Task instance."""
service, id_ = uri.split(":", 1)
for retriever in self.retrievers:
if retriever.__class__.uri_service == service:
self.log.info("Using handler %s for %s", retriever.name, uri)
task = Task(uri, retriever, self)
task.start()
return task
logging.warning("No provider found for URI " + uri)
raise ValueError("No provider found for URI " + uri)
def has_exception(self):
"""Returns whether or not there are unhandled exceptions in the child exception queue."""
return not self.child_exception_queue.empty()
def get_exceptions(self):
"""Returns all unhandled child exceptions as a list of dicts {task: Task, exception: Exception}."""
ret = []
while True:
try:
ret.append(dict(zip(("task", "exception"), self.child_exception_queue.get_nowait())))
except queue.Empty:
return ret |
40223139/203739test | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/token.py | 743 | """Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
RARROW = 50
ELLIPSIS = 51
OP = 52
ERRORTOKEN = 53
N_TOKENS = 54
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
_main()
|
idncom/odoo | refs/heads/8.0 | addons/edi/models/res_currency.py | 437 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from edi import EDIMixin
from openerp import SUPERUSER_ID
RES_CURRENCY_EDI_STRUCT = {
#custom: 'code'
'symbol': True,
'rate': True,
}
class res_currency(osv.osv, EDIMixin):
_inherit = "res.currency"
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
edi_struct = dict(edi_struct or RES_CURRENCY_EDI_STRUCT)
edi_doc_list = []
for currency in records:
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(res_currency,self).edi_export(cr, uid, [currency], edi_struct, context)[0]
edi_doc.update(code=currency.name)
edi_doc_list.append(edi_doc)
return edi_doc_list
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('code','symbol'), edi_document)
external_id = edi_document['__id']
existing_currency = self._edi_get_object_by_external_id(cr, uid, external_id, 'res_currency', context=context)
if existing_currency:
return existing_currency.id
# find with unique ISO code
existing_ids = self.search(cr, uid, [('name','=',edi_document['code'])])
if existing_ids:
return existing_ids[0]
# nothing found, create a new one
currency_id = self.create(cr, SUPERUSER_ID, {'name': edi_document['code'],
'symbol': edi_document['symbol']}, context=context)
rate = edi_document.pop('rate')
if rate:
self.pool.get('res.currency.rate').create(cr, SUPERUSER_ID, {'currency_id': currency_id,
'rate': rate}, context=context)
return currency_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kevinpark1217/PicoCTF-2017 | refs/heads/master | Much Ado About Hacking/calc.py | 1 | f = open('ending.txt', 'r')
text = f.read()
calc=text[0]
for i in range(1,len(text)-1,1):
c1 = ord(calc[i-1])
c = ord(text[i])
#c=((c1-32)+(o-32))%96+32
o = c-32+96+32+32-c1
if o > (96+32):
o = c-32+32+32-c1
calc += chr(o)
print calc[::-1]
|
Rodolfoarv/Mars-Explorer-AI-with-Python- | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py | 1777 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
globau/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/set_sys_path.py | 496 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Configuration for testing.
Test files should import this module before mod_pywebsocket.
"""
import os
import sys
# Add the parent directory to sys.path to enable importing mod_pywebsocket.
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
# vi:sts=4 sw=4 et
|
GiantSteps/essentia | refs/heads/master | test/src/unittest/temporal/test_logattacktime_streaming.py | 10 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import LogAttackTime as sLogAttackTime
class TestLogAttackTime_Streaming(TestCase):
def testEmpty(self):
gen = VectorInput([])
logAttack = sLogAttackTime()
accu = RealAccumulator()
p = Pool()
gen.data >> accu.data
accu.array >> logAttack.signal
logAttack.logAttackTime >> (p, 'logAttackTime')
run(gen)
self.assertEqual(p.descriptorNames(), [])
def testRegression(self):
# triangle input
input = [float(i) for i in range(22050)]
input.reverse()
input += [float(i) for i in range(22050)]
gen = VectorInput(input)
logAttack = sLogAttackTime()
accu = RealAccumulator()
p = Pool()
gen.data >> accu.data
accu.array >> logAttack.signal
logAttack.logAttackTime >> (p, 'logAttackTime')
run(gen)
self.assertAlmostEqual(p['logAttackTime'][0], LogAttackTime()(input))
suite = allTests(TestLogAttackTime_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
harveybia/face-hack | refs/heads/master | venv/face/lib/python2.7/site-packages/setuptools/dist.py | 259 | __all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import basestring, PY2
from setuptools import windows_support
import pkg_resources
packaging = pkg_resources.packaging
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
hryamzik/ansible | refs/heads/devel | lib/ansible/module_utils/network/aireos/aireos.py | 85 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
aireos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
aireos_argument_spec = {
'provider': dict(type='dict', options=aireos_provider_spec)
}
aireos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
aireos_argument_spec.update(aireos_top_spec)
def sanitize(resp):
# Takes response from device and strips whitespace from all lines
# Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do
# Aireos also adds in trailing whitespace that is unused
cleaned = []
for line in resp.splitlines():
cleaned.append(line.strip())
return '\n'.join(cleaned).strip()
def get_provider_argspec():
return aireos_provider_spec
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show run-config commands '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(sanitize(to_text(out, errors='surrogate_then_replace')))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'config')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
for command in to_list(commands):
if command == 'end':
continue
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
exec_command(module, 'end')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.