repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
bird50/birdproj | refs/heads/master | birdproj/profiles/models.py | 3 | import os
import uuid
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
def avatar_upload(instance, filename):
ext = filename.split(".")[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join("avatars", filename)
class Profile(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=75, blank=True)
avatar = models.ImageField(upload_to=avatar_upload, blank=True)
bio = models.TextField(blank=True)
affiliation = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100, blank=True)
website = models.CharField(max_length=250, blank=True)
twitter_username = models.CharField("Twitter Username", max_length=100, blank=True)
created_at = models.DateTimeField(default=timezone.now)
modified_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
self.modified_at = timezone.now()
return super(Profile, self).save(*args, **kwargs)
@property
def display_name(self):
if self.name:
return self.name
else:
return self.user.username
|
vineet-rh/incubator-airflow | refs/heads/master | airflow/operators/presto_to_mysql.py | 46 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from airflow.hooks.presto_hook import PrestoHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PrestoToMySqlTransfer(BaseOperator):
"""
Moves data from Presto to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param presto_conn_id: source presto connection
:type presto_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
presto_conn_id='presto_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
*args, **kwargs):
super(PrestoToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.presto_conn_id = presto_conn_id
def execute(self, context):
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
logging.info("Extracting data from Presto")
logging.info(self.sql)
results = presto.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
logging.info("Running MySQL preoperator")
logging.info(self.mysql_preoperator)
mysql.run(self.mysql_preoperator)
logging.info("Inserting rows into MySQL")
mysql.insert_rows(table=self.mysql_table, rows=results)
|
wethegang/wethegang.github.io | refs/heads/master | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
Bashar/django | refs/heads/master | tests/get_or_create/models.py | 90 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class DefaultPerson(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
class Profile(models.Model):
person = models.ForeignKey(Person, primary_key=True)
class Tag(models.Model):
text = models.CharField(max_length=255, unique=True)
class Thing(models.Model):
name = models.CharField(max_length=256)
tags = models.ManyToManyField(Tag)
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books', db_column="publisher_id_column")
|
amarandon/opencore | refs/heads/master | opencore/models/tests/test_forum.py | 4 | # Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz.org
# 2010-2011 Large Blue
# Fergus Doyle: fergus.doyle@largeblue.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import unittest
from repoze.bfg import testing
class ForumsFolderTests(unittest.TestCase):
def _getTargetClass(self):
from opencore.models.forum import ForumsFolder
return ForumsFolder
def _makeOne(self):
return self._getTargetClass()()
def test_class_conforms_to_IForum(self):
from zope.interface.verify import verifyClass
from opencore.models.interfaces import IForumsFolder
verifyClass(IForumsFolder, self._getTargetClass())
def test_instance_conforms_to_IForum(self):
from zope.interface.verify import verifyObject
from opencore.models.interfaces import IForumsFolder
verifyObject(IForumsFolder, self._makeOne())
class ForumTests(unittest.TestCase):
def _getTargetClass(self):
from opencore.models.forum import Forum
return Forum
def _makeOne(self, title='title', description='description', creator=None):
return self._getTargetClass()(title, description, creator)
def test_class_conforms_to_IForum(self):
from zope.interface.verify import verifyClass
from opencore.models.interfaces import IForum
verifyClass(IForum, self._getTargetClass())
def test_instance_conforms_to_IForum(self):
from zope.interface.verify import verifyObject
from opencore.models.interfaces import IForum
verifyObject(IForum, self._makeOne())
class ForumTopicTests(unittest.TestCase):
def _getTargetClass(self):
from opencore.models.forum import ForumTopic
return ForumTopic
def _makeOne(self, title=u'title', text=u'text', creator=u'admin' ):
return self._getTargetClass()(title, text, creator)
def test_class_conforms_to_IForumTopic(self):
from zope.interface.verify import verifyClass
from opencore.models.interfaces import IForumTopic
verifyClass(IForumTopic, self._getTargetClass())
def test_instance_conforms_to_IForumTopic(self):
from zope.interface.verify import verifyObject
from opencore.models.interfaces import IForumTopic
verifyObject(IForumTopic, self._makeOne())
def test_instance_has_valid_construction(self):
instance = self._makeOne()
self.assertEqual(instance.title, u'title')
self.assertEqual(instance.text, u'text')
self.assertEqual(instance.creator, u'admin')
self.assertEqual(instance.modified_by, u'admin')
self.failUnless('comments' in instance)
from zope.interface.verify import verifyObject
from opencore.models.interfaces import ICommentsFolder
verifyObject(ICommentsFolder, instance['comments'])
def test_instance_construct_with_none(self):
instance = self._makeOne(text=None)
self.assertEqual(instance.text, u'')
class TestForumsToolFactory(unittest.TestCase):
def setUp(self):
testing.cleanUp()
def tearDown(self):
testing.cleanUp()
def _makeOne(self):
from opencore.models.forum import forums_tool_factory
return forums_tool_factory
def test_it(self):
from repoze.lemonade.interfaces import IContentFactory
testing.registerAdapter(lambda *arg, **kw: DummyContent, (None,),
IContentFactory)
context = testing.DummyModel()
request = testing.DummyRequest
factory = self._makeOne()
factory.add(context, request)
self.failUnless(context['forums'])
self.failUnless(factory.is_present(context, request))
factory.remove(context, request)
self.failIf(factory.is_present(context, request))
class DummyContent:
pass
|
arista-eosplus/ansible | refs/heads/devel | lib/ansible/modules/notification/grove.py | 34 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: grove
version_added: 1.4
short_description: Sends a notification to a grove.io channel
description:
- The C(grove) module sends a message for a service to a Grove.io
channel.
options:
channel_token:
description:
- Token of the channel to post to.
required: true
service:
description:
- Name of the service (displayed as the "user" in the message)
required: false
default: ansible
message:
description:
- Message content
required: true
url:
description:
- Service URL for the web client
required: false
icon_url:
description:
- Icon for the service
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
author: "Jonas Pfenniger (@zimbatm)"
'''
EXAMPLES = '''
- grove: >
channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
service=my-app
message=deployed {{ target }}
'''
from ansible.module_utils.six.moves.urllib.parse import urlencode
BASE_URL = 'https://grove.io/api/notice/%s/'
# ==============================================================
# do_notify_grove
def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
my_url = BASE_URL % (channel_token,)
my_data = dict(service=service, message=message)
if url is not None:
my_data['url'] = url
if icon_url is not None:
my_data['icon_url'] = icon_url
data = urlencode(my_data)
response, info = fetch_url(module, my_url, data=data)
if info['status'] != 200:
module.fail_json(msg="failed to send notification: %s" % info['msg'])
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
channel_token = dict(type='str', required=True, no_log=True),
message = dict(type='str', required=True),
service = dict(type='str', default='ansible'),
url = dict(type='str', default=None),
icon_url = dict(type='str', default=None),
validate_certs = dict(default='yes', type='bool'),
)
)
channel_token = module.params['channel_token']
service = module.params['service']
message = module.params['message']
url = module.params['url']
icon_url = module.params['icon_url']
do_notify_grove(module, channel_token, service, message, url, icon_url)
# Mission complete
module.exit_json(msg="OK")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
rschnapka/odoo | refs/heads/7.0 | addons/base_gengo/wizard/__init__.py | 434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_gengo_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mesocentrefc/Janua-SMS | refs/heads/master | janua/activity/process_command.py | 1 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import time
import Queue
import janua.utils.logger as jlogger
from janua.activity.activity import Activity
from janua import jdb
from janua.commands.db import CommandManager
from janua.commands import CommandError
from janua.commands.admin import AdminCommands
log = jlogger.getLogger(__name__)
class ProcessCommandActivity(Activity):
"""
A background process activity for database commands
"""
def __init__(self, *args, **kwargs):
super(ProcessCommandActivity, self).__init__(*args, **kwargs)
self.cmd_manager = CommandManager(jdb)
try:
self.cmd_manager.register()
except CommandError, err:
log.error(err)
def run(self):
while not self._stopevent.isSet():
try:
command, params = self.queue.get(True, 1)
except Queue.Empty:
pass
else:
try:
self.cmd_manager.insert(command, params)
except CommandError, err:
log.error(err)
try:
self.cmd_manager.execute()
except CommandError, err:
log.error(err)
|
mgedmin/ansible | refs/heads/devel | lib/ansible/modules/network/netvisor/pn_ospf.py | 29 | #!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_ospf
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove ospf protocol to a vRouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) routing
protocol to a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
state:
description:
- Assert the state of the ospf. Use 'present' to add ospf
and 'absent' to remove ospf.
required: True
default: present
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_network_ip:
description:
- Specify the network IP (IPv4 or IPv6) address.
required: True
pn_ospf_area:
description:
- Stub area number for the configuration. Required for vrouter-ospf-add.
"""
EXAMPLES = """
- name: "Add OSPF to vrouter"
pn_ospf:
state: present
pn_vrouter_name: name-string
pn_network_ip: 192.168.11.2/24
pn_ospf_area: 1.0.0.0
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_vrouter_name: name-string
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
NETWORK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-ospf-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If an OSPF network with the given ip exists on the given vRouter,
return NETWORK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
network_ip = module.params['pn_network_ip']
# Global flags
global VROUTER_EXISTS, NETWORK_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for OSPF networks
show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name
show += 'format network no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if network_ip in out:
NETWORK_EXISTS = True
else:
NETWORK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-add'
if state == 'absent':
command = 'vrouter-ospf-remove'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(type='str', default='present', choices=['present',
'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_network_ip=dict(required=True, type='str'),
pn_ospf_area=dict(type='str')
),
required_if=(
['state', 'present',
['pn_network_ip', 'pn_ospf_area']],
['state', 'absent', ['pn_network_ip']]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
network_ip = module.params['pn_network_ip']
ospf_area = module.params['pn_ospf_area']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
check_cli(module, cli)
if state == 'present':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NETWORK_EXISTS is True:
module.exit_json(
skipped=True,
msg=('OSPF with network ip %s already exists on %s'
% (network_ip, vrouter_name))
)
cli += (' %s vrouter-name %s network %s ospf-area %s'
% (command, vrouter_name, network_ip, ospf_area))
if state == 'absent':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NETWORK_EXISTS is False:
module.exit_json(
skipped=True,
msg=('OSPF with network ip %s already exists on %s'
% (network_ip, vrouter_name))
)
cli += (' %s vrouter-name %s network %s'
% (command, vrouter_name, network_ip))
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
Evervolv/android_kernel_htc_msm7x30-3.0 | refs/heads/mm-6.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
ron8hu/spark | refs/heads/master | examples/src/main/python/mllib/k_means_example.py | 123 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from numpy import array
from math import sqrt
# $example off$
from pyspark import SparkContext
# $example on$
from pyspark.mllib.clustering import KMeans, KMeansModel
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="KMeansExample") # SparkContext
# $example on$
# Load and parse the data
data = sc.textFile("data/mllib/kmeans_data.txt")
parsedData = data.map(lambda line: array([float(x) for x in line.split(' ')]))
# Build the model (cluster the data)
clusters = KMeans.train(parsedData, 2, maxIterations=10, initializationMode="random")
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = parsedData.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
# Save and load model
clusters.save(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
sameModel = KMeansModel.load(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
# $example off$
sc.stop()
|
gautamMalu/rootfs_xen_arndale | refs/heads/master | usr/lib/python3/dist-packages/gi/_signalhelper.py | 6 | # -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2012 Simon Feltman
#
# gi/_signalhelper.py: GObject signal binding decorator object
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import sys
import inspect
from ._gi import _gobject
# Callable went away in python 3.0 and came back in 3.2.
# Use versioning to figure out when to define it, otherwise we have to deal with
# the complexity of using __builtin__ or builtin between python versions to
# check if callable exists which PyFlakes will also complain about.
if (3, 0) <= sys.version_info < (3, 2):
def callable(fn):
return hasattr(fn, '__call__')
class Signal(str):
"""Object which gives a nice API for creating and binding signals.
:param name:
Name of signal or callable closure when used as a decorator.
:type name: str or callable
:param callable func:
Callable closure method.
:param GObject.SignalFlags flags:
Flags specifying when to run closure.
:param type return_type:
Return type of the Signal.
:param list arg_types:
List of argument types specifying the signals function signature
:param str doc:
Documentation of signal object.
:param callable accumulator:
Accumulator method with the signature:
func(ihint, return_accu, handler_return, accu_data) -> boolean
:param object accu_data:
User data passed to the accumulator.
:Example:
.. code-block:: python
class Spam(GObject.Object):
velocity = 0
@GObject.Signal
def pushed(self):
self.velocity += 1
@GObject.Signal(flags=GObject.SignalFlags.RUN_LAST)
def pulled(self):
self.velocity -= 1
stomped = GObject.Signal('stomped', arg_types=(int,))
@GObject.Signal
def annotated_signal(self, a:int, b:str):
"Python3 annotation support for parameter types.
def on_pushed(obj):
print(obj)
spam = Spam()
spam.pushed.connect(on_pushed)
spam.pushed.emit()
"""
class BoundSignal(str):
"""
Temporary binding object which can be used for connecting signals
without specifying the signal name string to connect.
"""
def __new__(cls, name, *args, **kargs):
return str.__new__(cls, name)
def __init__(self, signal, gobj):
str.__init__(self)
self.signal = signal
self.gobj = gobj
def __repr__(self):
return 'BoundSignal("%s")' % self
def __call__(self, *args, **kargs):
"""Call the signals closure."""
return self.signal.func(self.gobj, *args, **kargs)
def connect(self, callback, *args, **kargs):
"""Same as GObject.Object.connect except there is no need to specify
the signal name."""
return self.gobj.connect(self, callback, *args, **kargs)
def connect_detailed(self, callback, detail, *args, **kargs):
"""Same as GObject.Object.connect except there is no need to specify
the signal name. In addition concats "::<detail>" to the signal name
when connecting; for use with notifications like "notify" when a property
changes.
"""
return self.gobj.connect(self + '::' + detail, callback, *args, **kargs)
def disconnect(self, handler_id):
"""Same as GObject.Object.disconnect."""
self.instance.disconnect(handler_id)
def emit(self, *args, **kargs):
"""Same as GObject.Object.emit except there is no need to specify
the signal name."""
return self.gobj.emit(str(self), *args, **kargs)
def __new__(cls, name='', *args, **kargs):
if callable(name):
name = name.__name__
return str.__new__(cls, name)
def __init__(self, name='', func=None, flags=_gobject.SIGNAL_RUN_FIRST,
return_type=None, arg_types=None, doc='', accumulator=None, accu_data=None):
if func and not name:
name = func.__name__
elif callable(name):
func = name
name = func.__name__
if func and not doc:
doc = func.__doc__
str.__init__(self)
if func and not (return_type or arg_types):
return_type, arg_types = get_signal_annotations(func)
if arg_types is None:
arg_types = tuple()
self.func = func
self.flags = flags
self.return_type = return_type
self.arg_types = arg_types
self.__doc__ = doc
self.accumulator = accumulator
self.accu_data = accu_data
def __get__(self, instance, owner=None):
"""Returns a BoundSignal when accessed on an object instance."""
if instance is None:
return self
return self.BoundSignal(self, instance)
def __call__(self, obj, *args, **kargs):
"""Allows for instantiated Signals to be used as a decorator or calling
of the underlying signal method."""
# If obj is a GObject, than we call this signal as a closure otherwise
# it is used as a re-application of a decorator.
if isinstance(obj, _gobject.GObject):
self.func(obj, *args, **kargs)
else:
# If self is already an allocated name, use it otherwise create a new named
# signal using the closure name as the name.
if str(self):
name = str(self)
else:
name = obj.__name__
# Return a new value of this type since it is based on an immutable string.
return type(self)(name=name, func=obj, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def copy(self, newName=None):
"""Returns a renamed copy of the Signal."""
if newName is None:
newName = self.name
return type(self)(name=newName, func=self.func, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def get_signal_args(self):
"""Returns a tuple of: (flags, return_type, arg_types, accumulator, accu_data)"""
return (self.flags, self.return_type, self.arg_types, self.accumulator, self.accu_data)
class SignalOverride(Signal):
"""Specialized sub-class of Signal which can be used as a decorator for overriding
existing signals on GObjects.
:Example:
.. code-block:: python
class MyWidget(Gtk.Widget):
@GObject.SignalOverride
def configure_event(self):
pass
"""
def get_signal_args(self):
"""Returns the string 'override'."""
return 'override'
def get_signal_annotations(func):
"""Attempt pulling python 3 function annotations off of 'func' for
use as a signals type information. Returns an ordered nested tuple
of (return_type, (arg_type1, arg_type2, ...)). If the given function
does not have annotations then (None, tuple()) is returned.
"""
arg_types = tuple()
return_type = None
if hasattr(func, '__annotations__'):
spec = inspect.getfullargspec(func)
arg_types = tuple(spec.annotations[arg] for arg in spec.args
if arg in spec.annotations)
if 'return' in spec.annotations:
return_type = spec.annotations['return']
return return_type, arg_types
def install_signals(cls):
"""Adds Signal instances on a GObject derived class into the '__gsignals__'
dictionary to be picked up and registered as real GObject signals.
"""
gsignals = cls.__dict__.get('__gsignals__', {})
newsignals = {}
for name, signal in cls.__dict__.items():
if isinstance(signal, Signal):
signalName = str(signal)
# Fixup a signal which is unnamed by using the class variable name.
# Since Signal is based on string which immutable,
# we must copy and replace the class variable.
if not signalName:
signalName = name
signal = signal.copy(name)
setattr(cls, name, signal)
if signalName in gsignals:
raise ValueError('Signal "%s" has already been registered.' % name)
newsignals[signalName] = signal
gsignals[signalName] = signal.get_signal_args()
cls.__gsignals__ = gsignals
# Setup signal closures by adding the specially named
# method to the class in the form of "do_<signal_name>".
for name, signal in newsignals.items():
if signal.func is not None:
funcName = 'do_' + name.replace('-', '_')
if not hasattr(cls, funcName):
setattr(cls, funcName, signal.func)
|
TalShafir/ansible | refs/heads/devel | lib/ansible/modules/network/aci/aci_aep_to_domain.py | 15 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_aep_to_domain
short_description: Bind AEPs to Physical or Virtual Domains (infra:RsDomP)
description:
- Bind AEPs to Physical or Virtual Domains on Cisco ACI fabrics.
notes:
- The C(aep) and C(domain) parameters should exist before using this module.
The M(aci_aep) and M(aci_domain) can be used for these.
- More information about the internal APIC class B(infra:RsDomP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.5'
options:
aep:
description:
- The name of the Attachable Access Entity Profile.
aliases: [ aep_name ]
domain:
description:
- Name of the physical or virtual domain being associated with the AEP.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ fc, l2dom, l3dom, phys, vmm ]
aliases: [ type ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add AEP to domain binding
aci_aep_to_domain: &binding_present
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: present
delegate_to: localhost
- name: Remove AEP to domain binding
aci_aep_to_domain: &binding_absent
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: absent
delegate_to: localhost
- name: Query our AEP to domain binding
aci_aep_to_domain:
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: query
delegate_to: localhost
register: query_result
- name: Query all AEP to domain bindings
aci_aep_to_domain: &binding_query
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
aep=dict(type='str', aliases=['aep_name']), # Not required for querying all objects
domain=dict(type='str', aliases=['domain_name', 'domain_profile']), # Not required for querying all objects
domain_type=dict(type='str', choices=['fc', 'l2dom', 'l3dom', 'phys', 'vmm'], aliases=['type']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
vm_provider=dict(type='str', choices=['cloudfoundry', 'kubernetes', 'microsoft', 'openshift', 'openstack', 'redhat', 'vmware']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['aep', 'domain', 'domain_type']],
['state', 'present', ['aep', 'domain', 'domain_type']],
],
required_together=[
['domain', 'domain_type'],
],
)
aep = module.params['aep']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
state = module.params['state']
# Report when vm_provider is set when type is not virtual
if domain_type != 'vmm' and vm_provider is not None:
module.fail_json(msg="Domain type '{0}' cannot have a 'vm_provider'".format(domain_type))
# Compile the full domain for URL building
if domain_type == 'fc':
domain_mo = 'uni/fc-{0}'.format(domain)
elif domain_type == 'l2dom':
domain_mo = 'uni/l2dom-{0}'.format(domain)
elif domain_type == 'l3dom':
domain_mo = 'uni/l3dom-{0}'.format(domain)
elif domain_type == 'phys':
domain_mo = 'uni/phys-{0}'.format(domain)
elif domain_type == 'vmm':
domain_mo = 'uni/vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
else:
domain_mo = None
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAttEntityP',
aci_rn='infra/attentp-{0}'.format(aep),
module_object=aep,
target_filter={'name': aep},
),
subclass_1=dict(
aci_class='infraRsDomP',
aci_rn='rsdomP-[{0}]'.format(domain_mo),
module_object=domain_mo,
target_filter={'tDn': domain_mo},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraRsDomP',
class_config=dict(tDn=domain_mo),
)
aci.get_diff(aci_class='infraRsDomP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
tersmitten/ansible | refs/heads/devel | lib/ansible/parsing/yaml/__init__.py | 2520 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
JeremyViehland/bru | refs/heads/master | brulib/library.py | 3 | """ Helpers for accessing files in the ./library dir.
This dir has a $module/$version.bru+gyp structure, containing information
about how to download tar.gzs for (or hwo to clone) each module, as well
as for how to build the module's libs and some of its tests/examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import functools
import brulib.jsonc
def alphnumeric_lt(a, b):
""" helper func for module version comparison """
# from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
def to_alphanumeric_pairs(text):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return alphanum_key(text)
return to_alphanumeric_pairs(a) < to_alphanumeric_pairs(b)
@functools.total_ordering
class ModuleVersion:
""" helper class for module version comparison """
def __init__(self, version_text):
self.version_text = version_text
def __lt__(self, other):
lhs = self .version_text
rhs = other.version_text
# module versions could be straightforward like 1.2.3, or they could be
# openssl-style mixtures of numberrs & letters like 1.0.0f
return alphnumeric_lt(lhs, rhs)
class Library:
""" Gives access to content of ./library, getting information about modules
and versions
"""
def __init__(self, library_rootdir):
""" param library_rootdir e.g. './library' """
self._library_rootdir = library_rootdir
def get_root_dir(self):
""" return ctor param """
return self._library_rootdir
def get_module_dir(self, module_name):
""" get the dir containing the *.bru and other files for this module,
so return a subdir of get_root_dir().
"""
module_dir = os.path.join(self.get_root_dir(), module_name)
return module_dir
def _load_from_library(self, module_name, module_version, ext):
""" ext e.g. '.bru' or '.gyp' """
json_file_name = os.path.join(self.get_module_dir(module_name), module_version + ext)
jso = brulib.jsonc.loadfile(json_file_name)
return jso
def has_formula(self, module_name, module_version):
# lame impl: throw needless exception if module doesn't exist. Revise?
try:
self.load_formula(module_name, module_version)
return True
except:
return False
def load_formula(self, module_name, module_version):
""" E.g. to load recipe for module_name='zlib' module_version='1.2.8' """
# Recipes will be downloaded from some server some day (e..g from github
# directly).
formula = self._load_from_library(module_name, module_version, '.bru')
assert formula['module'] == module_name and formula['version'] == module_version
return formula
def load_gyp(self, formula):
""" to load the gyp file associated with a formula """
gyp = self._load_from_library(formula['module'], formula['version'], '.gyp')
assert 'targets' in gyp # otherwise it's not a (or is an empty) gyp file
return gyp
def _save_to_library(self, formula, jso, ext):
""" param jso is the dict or OrderedDict to save, which can by the
forumula itself, or a gyp file, or ... """
module_version = formula['version']
module_dir = self.get_module_dir(formula['module'])
file_name = os.path.join(module_dir, module_version + ext)
brulib.jsonc.savefile(file_name, jso)
#print("not modifying existing " + bru_file_name)
def save_formula(self, formula):
""" param formula is the same dict as returned by load_formula,
so should be an OrderedDict.
"""
self._save_to_library(formula, formula, '.bru')
def save_gyp(self, formula, gyp):
""" param is a dict representing gyp file content """
self._save_to_library(formula, gyp, '.gyp')
def get_all_versions(self, module):
""" yield all known versions of a module """
bru_file_names = os.listdir(self.get_module_dir(module))
regex = re.compile('^(.+)\\.bru$') # version can be 1.2.3 or 1.2rc7 or ...
for bru_file_name in bru_file_names:
match = regex.match(bru_file_name)
if match != None:
version = match.group(1)
yield version
def get_latest_version_of(self, module):
""" return the latest version of a module using alphanumeric comparison
of version strings. So this works fine for versions like '3.2.1'
but not as well when comparing '3.2.1rc1' with '3.2.1beta7'
"""
versions = self.get_all_versions(module)
return max((ModuleVersion(version_text) for version_text in versions)).version_text
|
GunoH/intellij-community | refs/heads/master | python/testData/codeInsight/smartEnter/multilineSetLiteral.py | 10 | xs = {
1<caret>
} |
CMPUT404F16T04/sturdy-octo-computing-machine | refs/heads/develop | mysite/manage.py | 709 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
fieldaware/optimoroute | refs/heads/master | tests/test_encoders.py | 1 | # -*- coding: utf-8 -*-
import datetime
import json
from decimal import Decimal
from optimo.util import CoreOptimoEncoder
def test_coreoptimoencoder():
dt = datetime.datetime(year=2014, month=12, day=5, hour=8, minute=0)
dec = Decimal('4.5')
d = {'datetime': dt, 'a_decimal': dec, 'integer': 5}
assert json.dumps(d, cls=CoreOptimoEncoder) == \
'{"a_decimal": 4.5, "integer": 5, "datetime": "2014-12-05T08:00"}'
|
danielreed/python-hpOneView | refs/heads/master | examples/scripts/get-restores.py | 2 | #!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
from pprint import pprint
import hpOneView as hpov
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getrestores(sts):
ret = sts.get_restores()
pprint(ret)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Restores
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
net = hpov.networking(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
getrestores(sts)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
thaleslima/WS-pedido-venda | refs/heads/master | pedidovenda/admin.py | 1 | from django.contrib import admin
# Register your models here.
from pedidovenda.models import Categoria
from pedidovenda.models import Produto
from pedidovenda.models import Pedido
from pedidovenda.models import ItemPedido
from pedidovenda.models import Mesa
from pedidovenda.models import Usuario
from django.contrib import admin
class CategoriaAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao')
class ProdutoAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao', 'valor', 'categoria')
def get_name(self, obj):
return obj.categoria.descricao
get_name.admin_order_field = 'categoria' #Allows column order sorting
get_name.short_description = 'Categoria' #Renames column head
class MesaAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao', 'status', 'tipo')
class PedidoAdmin(admin.ModelAdmin):
list_display = ('id', 'valorTotal', 'codigoAtendente', 'status')
class ItemPedidoAdmin(admin.ModelAdmin):
list_display = ('id', 'quantidade', 'observacao', 'status', 'valorUnit', 'valorTotalItem')
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('id', 'nome', 'email', 'login', 'senha')
admin.site.register(Produto,ProdutoAdmin)
admin.site.register(Pedido,PedidoAdmin)
admin.site.register(ItemPedido,ItemPedidoAdmin)
admin.site.register(Categoria, CategoriaAdmin)
admin.site.register(Mesa, MesaAdmin)
admin.site.register(Usuario, UsuarioAdmin) |
sarlalian/ansible | refs/heads/devel | lib/ansible/plugins/shell/csh.py | 92 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
_SHELL_REDIRECT_ALLNULL = '>& /dev/null'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
|
t11e/django | refs/heads/master | django/contrib/gis/geos/tests/test_mutable_list.py | 75 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from django.contrib.gis.geos.mutable_list import ListMixin
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = range(length)
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return range(-1 - self.limit, 0) + range(1, 1 + self.limit)
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = (int, long)
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = range(10,14)
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assert_(pl >= ul, 'cmp for gte self')
self.assert_(pl <= ul, 'cmp for lte self')
self.assert_(ul >= pl, 'cmp for self gte')
self.assert_(ul <= pl, 'cmp for self lte')
self.assert_(pl + [5] > ul, 'cmp')
self.assert_(pl + [5] >= ul, 'cmp')
self.assert_(pl < ul + [2], 'cmp')
self.assert_(pl <= ul + [2], 'cmp')
self.assert_(ul + [5] > pl, 'cmp')
self.assert_(ul + [5] >= pl, 'cmp')
self.assert_(ul < pl + [2], 'cmp')
self.assert_(ul <= pl + [2], 'cmp')
pl[1] = 20
self.assert_(pl > ul, 'cmp for gt self')
self.assert_(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assert_(pl < ul, 'cmp for lt self')
self.assert_(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(ListMixinTest))
s.addTest(unittest.makeSuite(ListMixinTestSingle))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
|
mathstuf/bugwarrior | refs/heads/develop | bugwarrior/services/githubutils.py | 2 | """ Tools for querying github.
I tried using pygithub3, but it really sucks.
"""
import requests
def _link_field_to_dict(field):
""" Utility for ripping apart github's Link header field.
It's kind of ugly.
"""
if not field:
return dict()
return dict([
(
part.split('; ')[1][5:-1],
part.split('; ')[0][1:-1],
) for part in field.split(', ')
])
def get_repos(username, auth):
""" username should be a string
auth should be a tuple of username and password.
item can be one of "repos" or "orgs"
"""
tmpl = "https://api.github.com/users/{username}/repos?per_page=100"
url = tmpl.format(username=username)
return _getter(url, auth)
def get_issues(username, repo, auth):
""" username and repo should be strings
auth should be a tuple of username and password.
"""
tmpl = "https://api.github.com/repos/{username}/{repo}/issues?per_page=100"
url = tmpl.format(username=username, repo=repo)
return _getter(url, auth)
def get_directly_assigned_issues(auth):
""" Returns all issues assigned to authenticated user.
This will return all issues assigned to the authenticated user
regardless of whether the user owns the repositories in which the
issues exist.
"""
url = "https://api.github.com/user/issues?per_page=100"
return _getter(url, auth)
def get_comments(username, repo, number, auth):
tmpl = "https://api.github.com/repos/{username}/{repo}/issues/" + \
"{number}/comments?per_page=100"
url = tmpl.format(username=username, repo=repo, number=number)
return _getter(url, auth)
def get_pulls(username, repo, auth):
""" username and repo should be strings
auth should be a tuple of username and password.
"""
tmpl = "https://api.github.com/repos/{username}/{repo}/pulls?per_page=100"
url = tmpl.format(username=username, repo=repo)
return _getter(url, auth)
def _getter(url, auth):
""" Pagination utility. Obnoxious. """
results = []
link = dict(next=url)
while 'next' in link:
response = requests.get(link['next'], auth=auth)
# And.. if we didn't get good results, just bail.
if response.status_code != 200:
raise IOError(
"Non-200 status code %r; %r; %r" % (
response.status_code, url, response.json))
if callable(response.json):
# Newer python-requests
results += response.json()
else:
# Older python-requests
results += response.json
link = _link_field_to_dict(response.headers.get('link', None))
return results
if __name__ == '__main__':
# Little test.
import getpass
username = raw_input("GitHub Username: ")
password = getpass.getpass()
results = get_all(username, (username, password))
print len(results), "repos found."
|
sankroh/satchmo | refs/heads/master | satchmo/payment/modules/cod/processor.py | 2 | """
Handle a cash-on-delivery payment.
"""
from django.utils.translation import ugettext as _
from satchmo.payment.utils import record_payment
class PaymentProcessor(object):
def __init__(self, settings):
self.settings = settings
def prepareData(self, order):
self.order = order
def process(self):
"""
COD is always successful.
"""
reason_code = "0"
response_text = _("Success")
record_payment(self.order, self.settings, amount=self.order.balance)
return (True, reason_code, response_text)
|
plumgrid/plumgrid-nova | refs/heads/master | nova/cmd/novnc.py | 12 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
opts = [
cfg.BoolOpt('record',
default=False,
help='Record sessions to FILE.[session_number]'),
cfg.BoolOpt('daemon',
default=False,
help='Become a daemon (background process)'),
cfg.BoolOpt('ssl_only',
default=False,
help='Disallow non-encrypted connections'),
cfg.BoolOpt('source_is_ipv6',
default=False,
help='Source is ipv6'),
cfg.StrOpt('cert',
default='self.pem',
help='SSL certificate file'),
cfg.StrOpt('key',
help='SSL key file (if separate from cert)'),
cfg.StrOpt('web',
default='/usr/share/spice-html5',
help='Run webserver on same port. Serve files from DIR.'),
]
cfg.CONF.register_cli_opts(opts)
|
mozilla/make.mozilla.org | refs/heads/master | vendor-local/lib/python/kombu/transport/virtual/exchange.py | 13 | """
kombu.transport.virtual.exchange
================================
Implementations of the standard exchanges defined
by the AMQ protocol (excluding the `headers` exchange).
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
class ExchangeType(object):
"""Implements the specifics for an exchange type.
:param channel: AMQ Channel
"""
type = None
def __init__(self, channel):
self.channel = channel
def lookup(self, table, exchange, routing_key, default):
"""Lookup all queues matching `routing_key` in `exchange`.
:returns: `default` if no queues matched.
"""
raise NotImplementedError("subclass responsibility")
def prepare_bind(self, queue, exchange, routing_key, arguments):
"""Returns tuple of `(routing_key, regex, queue)` to be stored
for bindings to this exchange."""
return routing_key, None, queue
def equivalent(self, prev, exchange, type, durable, auto_delete,
arguments):
"""Returns true if `prev` and `exchange` is equivalent."""
return (type == prev["type"] and
durable == prev["durable"] and
auto_delete == prev["auto_delete"] and
(arguments or {}) == (prev["arguments"] or {}))
class DirectExchange(ExchangeType):
"""The `direct` exchange routes based on exact routing keys."""
type = "direct"
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, _, queue in table
if rkey == routing_key]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
for queue in _lookup(exchange, routing_key):
_put(queue, message, **kwargs)
class TopicExchange(ExchangeType):
"""The `topic` exchange routes messages based on words separated by
dots, using wildcard characters ``*`` (any single word), and ``#``
(one or more words)."""
type = "topic"
#: map of wildcard to regex conversions
wildcards = {"*": r".*?[^\.]",
"#": r".*?"}
#: compiled regex cache
_compiled = {}
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, pattern, queue in table
if self._match(pattern, routing_key)]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
deadletter = self.channel.deadletter_queue
for queue in [q for q in _lookup(exchange, routing_key)
if q and q != deadletter]:
_put(queue, message, **kwargs)
def prepare_bind(self, queue, exchange, routing_key, arguments):
return routing_key, self.key_to_pattern(routing_key), queue
def key_to_pattern(self, rkey):
"""Get the corresponding regex for any routing key."""
return "^%s$" % ("\.".join(self.wildcards.get(word, word)
for word in rkey.split(".")))
def _match(self, pattern, string):
"""Same as :func:`re.match`, except the regex is compiled and cached,
then reused on subsequent matches with the same pattern."""
try:
compiled = self._compiled[pattern]
except KeyError:
compiled = self._compiled[pattern] = re.compile(pattern, re.U)
return compiled.match(string)
class FanoutExchange(ExchangeType):
"""The `fanout` exchange implements broadcast messaging by delivering
copies of all messages to all queues bound the the exchange.
To support fanout the virtual channel needs to store the table
as shared state. This requires that the `Channel.supports_fanout`
attribute is set to true, and the `Channel._queue_bind` and
`Channel.get_table` methods are implemented. See the redis backend
for an example implementation of these methods.
"""
type = "fanout"
def lookup(self, table, exchange, routing_key, default):
return [queue for _, _, queue in table]
def deliver(self, message, exchange, routing_key, **kwargs):
if self.channel.supports_fanout:
self.channel._put_fanout(exchange, message, **kwargs)
#: Map of standard exchange types and corresponding classes.
STANDARD_EXCHANGE_TYPES = {"direct": DirectExchange,
"topic": TopicExchange,
"fanout": FanoutExchange}
|
srimai/odoo | refs/heads/8.0 | addons/document/__init__.py | 434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
weleen/mxnet | refs/heads/master | example/caffe/caffe_net.py | 18 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from data import get_iterator
import argparse
import train_model
def get_mlp():
"""
multi-layer perceptron
"""
data = mx.symbol.Variable('data')
fc1 = mx.symbol.CaffeOp(data_0=data, num_weight=2, name='fc1', prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 128} }")
act1 = mx.symbol.CaffeOp(data_0=fc1, prototxt="layer{type:\"TanH\"}")
fc2 = mx.symbol.CaffeOp(data_0=act1, num_weight=2, name='fc2', prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 64} }")
act2 = mx.symbol.CaffeOp(data_0=fc2, prototxt="layer{type:\"TanH\"}")
fc3 = mx.symbol.CaffeOp(data_0=act2, num_weight=2, name='fc3', prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 10}}")
if use_caffe_loss:
label = mx.symbol.Variable('softmax_label')
mlp = mx.symbol.CaffeLoss(data=fc3, label=label, grad_scale=1, name='softmax', prototxt="layer{type:\"SoftmaxWithLoss\"}")
else:
mlp = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
return mlp
def get_lenet():
"""
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
Haffner. "Gradient-based learning applied to document recognition."
Proceedings of the IEEE (1998)
"""
data = mx.symbol.Variable('data')
# first conv
conv1 = mx.symbol.CaffeOp(data_0=data, num_weight=2, prototxt="layer{type:\"Convolution\" convolution_param { num_output: 20 kernel_size: 5 stride: 1} }")
act1 = mx.symbol.CaffeOp(data_0=conv1, prototxt="layer{type:\"TanH\"}")
pool1 = mx.symbol.CaffeOp(data_0=act1, prototxt="layer{type:\"Pooling\" pooling_param { pool: MAX kernel_size: 2 stride: 2}}")
# second conv
conv2 = mx.symbol.CaffeOp(data_0=pool1, num_weight=2, prototxt="layer{type:\"Convolution\" convolution_param { num_output: 50 kernel_size: 5 stride: 1} }")
act2 = mx.symbol.CaffeOp(data_0=conv2, prototxt="layer{type:\"TanH\"}")
pool2 = mx.symbol.CaffeOp(data_0=act2, prototxt="layer{type:\"Pooling\" pooling_param { pool: MAX kernel_size: 2 stride: 2}}")
fc1 = mx.symbol.CaffeOp(data_0=pool2, num_weight=2, prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 500} }")
act3 = mx.symbol.CaffeOp(data_0=fc1, prototxt="layer{type:\"TanH\"}")
# second fullc
fc2 = mx.symbol.CaffeOp(data_0=act3, num_weight=2, prototxt="layer{type:\"InnerProduct\"inner_product_param{num_output: 10} }")
if use_caffe_loss:
label = mx.symbol.Variable('softmax_label')
lenet = mx.symbol.CaffeLoss(data=fc2, label=label, grad_scale=1, name='softmax', prototxt="layer{type:\"SoftmaxWithLoss\"}")
else:
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
def get_network_from_json_file(file_name):
network = mx.sym.load(file_name)
return network
def parse_args():
parser = argparse.ArgumentParser(description='train an image classifier on mnist')
parser.add_argument('--network', type=str, default='lenet',
help='the cnn to use (mlp | lenet | <path to network json file>')
parser.add_argument('--caffe-loss', type=int, default=0,
help='Use CaffeLoss symbol')
parser.add_argument('--caffe-data', action='store_true',
help='Use Caffe input-data layer only if specified')
parser.add_argument('--data-dir', type=str, default='mnist/',
help='the input data directory')
parser.add_argument('--gpus', type=str,
help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--num-examples', type=int, default=60000,
help='the number of training examples')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size')
parser.add_argument('--lr', type=float, default=.1,
help='the initial learning rate')
parser.add_argument('--model-prefix', type=str,
help='the prefix of the model to load/save')
parser.add_argument('--save-model-prefix', type=str,
help='the prefix of the model to save')
parser.add_argument('--num-epochs', type=int, default=10,
help='the number of training epochs')
parser.add_argument('--load-epoch', type=int,
help="load the model on an epoch using the model-prefix")
parser.add_argument('--kv-store', type=str, default='local',
help='the kvstore type')
parser.add_argument('--lr-factor', type=float, default=1,
help='times the lr with a factor for every lr-factor-epoch epoch')
parser.add_argument('--lr-factor-epoch', type=float, default=1,
help='the number of epoch to factor the lr, could be .5')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
use_caffe_loss = args.caffe_loss
use_caffe_data = args.caffe_data
data_shape = ()
if args.network == 'mlp':
data_shape = (784, )
net = get_mlp()
elif args.network == 'lenet':
if not use_caffe_data:
data_shape = (1, 28, 28)
net = get_lenet()
else:
net = get_network_from_json_file(args.network)
# train
if use_caffe_loss:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data), mx.metric.Caffe())
else:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data))
|
adamwwt/chvac | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2923 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
bhairavmehta95/flashcard-helper-alexa-skill | refs/heads/master | venv/lib/python2.7/site-packages/requests/packages/chardet/mbcharsetprober.py | 2923 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
oomlout/oomlout-OOMP | refs/heads/master | OOMPpart_RESE_0402_X_O471_67.py | 1 | import OOMP
newPart = OOMP.oompItem(9264)
newPart.addTag("oompType", "RESE")
newPart.addTag("oompSize", "0402")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "O471")
newPart.addTag("oompIndex", "67")
OOMP.parts.append(newPart)
|
Fenixin/Minecraft-Region-Fixer | refs/heads/master | gui/version.py | 1 | '''
Created on 24/06/2014
@author: Alejandro
'''
version_string = "0.0.1"
version_numbers = version_string.split(".")
|
tmpgit/intellij-community | refs/heads/master | python/testData/mover/py950.py | 83 | class A:
def foo(self):
if a: pass
else: pass
a <caret>= 0
if a: pass
else: pass |
msporny/node-gyp | refs/heads/master | gyp/pylib/gyp/common_test.py | 2542 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
smkr/pyclipse | refs/heads/master | plugins/org.python.pydev.jython/Lib/zipfile.py | 8 | "Read and write ZIP files."
import struct, os, time
import binascii
try:
import zlib # We may need its compression method
except ImportError:
zlib = None
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile"]
class BadZipfile(Exception):
pass
error = BadZipfile # The exception raised by this module
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Here are some struct module formats for reading headers
structEndArchive = "<4s4H2lH" # 9 items, end of archive, 22 bytes
stringEndArchive = "PK\005\006" # magic number for end of archive record
structCentralDir = "<4s4B4H3l5H2l"# 19 items, central directory, 46 bytes
stringCentralDir = "PK\001\002" # magic number for central directory
structFileHeader = "<4s2B4H3l2H" # 12 items, file header record, 30 bytes
stringFileHeader = "PK\003\004" # magic number for file header
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4 # is this meaningful?
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# indexes of entries in the local file header structure
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2 # is this meaningful?
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# Used to compare file passed to ZipFile
import types
_STRING_TYPES = (types.StringType,)
if hasattr(types, "UnicodeType"):
_STRING_TYPES = _STRING_TYPES + (types.UnicodeType,)
def is_zipfile(filename):
"""Quickly see if file is a ZIP file by checking the magic number.
Will not accept a ZIP archive with an ending comment.
"""
try:
fpin = open(filename, "rb")
fpin.seek(-22, 2) # Seek to end-of-file record
endrec = fpin.read()
fpin.close()
if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000":
return 1 # file has correct magic number
except IOError:
pass
class ZipInfo:
"""Class with attributes describing each file in the ZIP archive."""
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/":
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
self.create_system = 0 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# file_offset Byte offset to the start of the file data
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, self.flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(self.filename), len(self.extra))
return header + self.filename + self.extra
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode[0]
# Check if we were passed a file-like object
if type(file) in _STRING_TYPES:
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
self.fp = open(file, modeDict[mode])
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
pass
elif key == 'a':
fp = self.fp
fp.seek(-22, 2) # Seek to end-of-file record
endrec = fp.read()
if endrec[0:4] == stringEndArchive and \
endrec[-2:] == "\000\000":
self._GetContents() # file is a zip file
# seek to start of directory and overwrite
fp.seek(self.start_dir, 0)
else: # file is not a zip file, just append
fp.seek(0, 2)
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
fp.seek(-22, 2) # Start of end-of-archive record
filesize = fp.tell() + 22 # Get file size
endrec = fp.read(22) # Archive must not end with a comment!
if endrec[0:4] != stringEndArchive or endrec[-2:] != "\000\000":
raise BadZipfile, "File is not a zip file, or ends with a comment"
endrec = struct.unpack(structEndArchive, endrec)
if self.debug > 1:
print endrec
size_cd = endrec[5] # bytes in central directory
offset_cd = endrec[6] # offset of central directory
x = filesize - 22 - size_cd
# "concat" is zero, unless zip was concatenated to another file
concat = x - offset_cd
if self.debug > 2:
print "given, inferred, offset", offset_cd, x, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
total = 0
while total < size_cd:
centdir = fp.read(46)
total = total + 46
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
total = (total + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + concat
# file_offset must be computed below...
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
self.filelist.append(x)
self.NameToInfo[x.filename] = x
if self.debug > 2:
print "total", total
for data in self.filelist:
fp.seek(data.header_offset, 0)
fheader = fp.read(30)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
# file_offset is computed here, since the extra field for
# the central directory and for the local file header
# refer to different fields, and they can have different
# lengths
data.file_offset = (data.header_offset + 30
+ fheader[_FH_FILENAME_LENGTH]
+ fheader[_FH_EXTRA_FIELD_LENGTH])
fname = fp.read(fheader[_FH_FILENAME_LENGTH])
if fname != data.orig_filename:
raise RuntimeError, \
'File name in directory "%s" and header "%s" differ.' % (
data.orig_filename, fname)
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
for zinfo in self.filelist:
try:
self.read(zinfo.filename) # Check CRC-32
except:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
return self.NameToInfo[name]
def read(self, name):
"""Return file bytes (as a string) for name."""
if self.mode not in ("r", "a"):
raise RuntimeError, 'read() requires mode "r" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
zinfo = self.getinfo(name)
filepos = self.fp.tell()
self.fp.seek(zinfo.file_offset, 0)
bytes = self.fp.read(zinfo.compress_size)
self.fp.seek(filepos, 0)
if zinfo.compress_type == ZIP_STORED:
pass
elif zinfo.compress_type == ZIP_DEFLATED:
if not zlib:
raise RuntimeError, \
"De-compression requires the (missing) zlib module"
# zlib compress/decompress code by Jeremy Hylton of CNRI
dc = zlib.decompressobj(-15)
bytes = dc.decompress(bytes)
# need to feed in unused pad byte so that zlib won't choke
ex = dc.decompress('Z') + dc.flush()
if ex:
bytes = bytes + ex
else:
raise BadZipfile, \
"Unsupported compression method %d for file %s" % \
(zinfo.compress_type, name)
crc = binascii.crc32(bytes)
if crc != zinfo.CRC:
raise BadZipfile, "Bad CRC-32 for file %s" % name
return bytes
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if self.NameToInfo.has_key(zinfo.filename):
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
st = os.stat(filename)
mtime = time.localtime(st[8])
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
zinfo = ZipInfo(filename, date_time)
else:
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = st[0] << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
self._writecheck(zinfo)
fp = open(filename, "rb")
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
zinfo.file_offset = self.fp.tell() # Start of file bytes
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = binascii.crc32(buf, CRC)
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
fp.close()
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo, bytes):
"""Write a file into the archive. The contents is the string
'bytes'."""
self._writecheck(zinfo)
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
zinfo.file_offset = self.fp.tell() # Start of file bytes
self.fp.write(bytes)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a"): # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
centdir = struct.pack(structCentralDir,
stringCentralDir, zinfo.create_version,
zinfo.create_system, zinfo.extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, zinfo.compress_size, zinfo.file_size,
len(zinfo.filename), len(zinfo.extra), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
zinfo.header_offset)
self.fp.write(centdir)
self.fp.write(zinfo.filename)
self.fp.write(zinfo.extra)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, count, count, pos2 - pos1, pos1, 0)
self.fp.write(endrec)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo)[8] >= os.stat(file_py)[8]:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc)[8] < os.stat(file_py)[8]:
import py_compile
if self.debug:
print "Compiling", file_py
py_compile.compile(file_py, file_pyc)
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
|
PyAndy/Py3NES | refs/heads/master | addressing.py | 1 | from typing import Optional
import numpy as np
import cpu as c
class Addressing(object):
data_length = 0
@classmethod
def get_instruction_length(cls):
return cls.data_length + 1
@classmethod
def get_offset(cls, cpu):
return 0
class XRegOffset(object):
@classmethod
def get_offset(cls, cpu):
return cpu.x_reg
class YRegOffset(object):
@classmethod
def get_offset(cls, cpu):
return cpu.y_reg
class ImpliedAddressing(Addressing):
"""
instructions that have data passed
example: CLD
"""
data_length = 0
class AccumulatorAddressing(Addressing):
"""
get value from accumulator
"""
data_length = 0
@classmethod
def get_data(cls, cpu, memory_address, data_bytes):
return cpu.a_reg
class ImmediateReadAddressing(Addressing):
"""
read a value from the instruction data
example: STA #7
example: 8D 07
"""
data_length = 1
@classmethod
def get_data(cls, cpu, memory_address, data_bytes):
return data_bytes[0]
class AbsoluteAddressing(Addressing):
"""
looks up an absolute memory address and returns the value
example: STA $12 34
example: 8D 34 12
"""
data_length = 2
@classmethod
def get_address(cls, cpu, data_bytes: bytes) -> Optional[int]:
return np.uint16(int.from_bytes(data_bytes, byteorder='little') + cls.get_offset(cpu))
class AbsoluteAddressingWithX(XRegOffset, AbsoluteAddressing):
"""
adds the x reg offset to an absolute memory location
"""
class AbsoluteAddressingWithY(YRegOffset, AbsoluteAddressing):
"""
adds the y reg offset to an absolute memory location
"""
class ZeroPageAddressing(Addressing):
"""
look up an absolute memory address in the first 256 bytes
example: STA $12
memory_address: $12
Note: can overflow
"""
data_length = 1
@classmethod
def get_address(cls, cpu, data_bytes: bytes) -> Optional[int]:
address = np.uint8(int.from_bytes(data_bytes, byteorder='little') + cls.get_offset(cpu))
return address
class ZeroPageAddressingWithX(XRegOffset, ZeroPageAddressing):
"""
adds the x reg offset to an absolute memory address in the first 256 bytes
"""
class ZeroPageAddressingWithY(YRegOffset, ZeroPageAddressing):
"""
adds the y reg offset to an absolute memory address in the first 256 bytes
"""
class RelativeAddressing(Addressing):
"""
offset from current PC, can only jump 128 bytes in either direction
"""
data_length = 1
@classmethod
def get_address(cls, cpu, data_bytes: bytes) -> Optional[int]:
# get the program counter
current_address = cpu.pc_reg
# offset by value in instruction, signed 8 bit value
return current_address + np.int8(int.from_bytes(data_bytes, byteorder='little'))
class IndirectBase(Addressing):
@classmethod
def get_address(cls, cpu: 'c.CPU', data_bytes: bytes):
# look up the bytes at [original_address, original_address + 1]
lsb_location = np.uint16(super().get_address(cpu, data_bytes))
msb_location = np.uint16(lsb_location + 1)
# wrap around on page boundaries
if msb_location % 0x100 == 0:
msb_location = np.uint16(lsb_location - 0xFF)
lsb = cpu.get_memory(lsb_location)
msb = cpu.get_memory(msb_location)
return np.uint16(int.from_bytes(bytes([lsb, msb]), byteorder='little'))
class IndirectAddressing(IndirectBase, AbsoluteAddressing):
"""
indirect address
"""
class IndirectAddressingWithX(IndirectBase, ZeroPageAddressingWithX):
"""
adds the x reg before indirection
"""
class IndirectAddressingWithY(IndirectBase, ZeroPageAddressing):
"""
adds the y reg after indirection
"""
@classmethod
def get_address(cls, cpu: 'c.CPU', data_bytes: bytes):
return np.uint16(super().get_address(cpu, data_bytes) + cpu.y_reg)
|
fgirault/smeuhsocial | refs/heads/master | apps/smeuhoverride/tests.py | 1 | # -*- encoding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
class BaseTestCase(TestCase):
def setUp(self):
self.me = self.create_user('bob')
self.login(self.me.username)
self.her = self.create_user('alice')
def create_user(self, username='bob'):
return User.objects.create_user(username, password='secret')
def login(self, username):
self.client.login(username=username, password='secret')
class BaseImageTest(BaseTestCase):
from os.path import join, dirname
testfile = join(dirname(dirname(dirname(__file__))), 'tests', '1px.gif')
class TestHomePage(BaseTestCase):
def test_home_page(self):
resp = self.client.get('/')
self.assertContains(resp, 'Homepage')
|
JoannaKielas/wedding-gallery | refs/heads/master | photologue/models.py | 1 | import os
import random
from datetime import datetime
from inspect import isclass
import logging
from io import BytesIO
from importlib import import_module
import exifread
import unicodedata
from django.utils.timezone import now
from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text, smart_str, filepath_to_uri
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import RegexValidator
from django.contrib.sites.models import Site
# Required PIL classes may or may not be available from the root namespace
# depending on the installation method used.
try:
import Image
import ImageFile
import ImageFilter
import ImageEnhance
except ImportError:
try:
from PIL import Image
from PIL import ImageFile
from PIL import ImageFilter
from PIL import ImageEnhance
except ImportError:
raise ImportError(
'Photologue was unable to import the Python Imaging Library. Please confirm it`s installed and available '
'on your current Python path.')
from sortedm2m.fields import SortedManyToManyField
from .utils.reflection import add_reflection
from .utils.watermark import apply_watermark
from .managers import GalleryQuerySet, PhotoQuerySet
logger = logging.getLogger('photologue.models')
# Default limit for gallery.latest
LATEST_LIMIT = getattr(settings, 'PHOTOLOGUE_GALLERY_LATEST_LIMIT', None)
# Number of random images from the gallery to display.
SAMPLE_SIZE = getattr(settings, 'PHOTOLOGUE_GALLERY_SAMPLE_SIZE', 5)
# max_length setting for the ImageModel ImageField
IMAGE_FIELD_MAX_LENGTH = getattr(settings, 'PHOTOLOGUE_IMAGE_FIELD_MAX_LENGTH', 100)
# Path to sample image
SAMPLE_IMAGE_PATH = getattr(settings, 'PHOTOLOGUE_SAMPLE_IMAGE_PATH', os.path.join(
os.path.dirname(__file__), 'res', 'sample.jpg'))
# Modify image file buffer size.
ImageFile.MAXBLOCK = getattr(settings, 'PHOTOLOGUE_MAXBLOCK', 256 * 2 ** 10)
# Photologue image path relative to media root
PHOTOLOGUE_DIR = getattr(settings, 'PHOTOLOGUE_DIR', 'photologue')
# Look for user function to define file paths
PHOTOLOGUE_PATH = getattr(settings, 'PHOTOLOGUE_PATH', None)
if PHOTOLOGUE_PATH is not None:
if callable(PHOTOLOGUE_PATH):
get_storage_path = PHOTOLOGUE_PATH
else:
parts = PHOTOLOGUE_PATH.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
get_storage_path = getattr(module, parts[-1])
else:
def get_storage_path(instance, filename):
fn = unicodedata.normalize('NFKD', force_text(filename)).encode('ascii', 'ignore').decode('ascii')
return os.path.join(PHOTOLOGUE_DIR, 'photos', fn)
# Support CACHEDIR.TAG spec for backups for ignoring cache dir.
# See http://www.brynosaurus.com/cachedir/spec.html
PHOTOLOGUE_CACHEDIRTAG = os.path.join(PHOTOLOGUE_DIR, "photos", "cache", "CACHEDIR.TAG")
if not default_storage.exists(PHOTOLOGUE_CACHEDIRTAG):
default_storage.save(PHOTOLOGUE_CACHEDIRTAG, ContentFile(
"Signature: 8a477f597d28d172789f06886806bc55"))
# Exif Orientation values
# Value 0thRow 0thColumn
# 1 top left
# 2 top right
# 3 bottom right
# 4 bottom left
# 5 left top
# 6 right top
# 7 right bottom
# 8 left bottom
# Image Orientations (according to EXIF informations) that needs to be
# transposed and appropriate action
IMAGE_EXIF_ORIENTATION_MAP = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
6: Image.ROTATE_270,
8: Image.ROTATE_90,
}
# Quality options for JPEG images
JPEG_QUALITY_CHOICES = (
(30, _('Very Low')),
(40, _('Low')),
(50, _('Medium-Low')),
(60, _('Medium')),
(70, _('Medium-High')),
(80, _('High')),
(90, _('Very High')),
)
# choices for new crop_anchor field in Photo
CROP_ANCHOR_CHOICES = (
('top', _('Top')),
('right', _('Right')),
('bottom', _('Bottom')),
('left', _('Left')),
('center', _('Center (Default)')),
)
IMAGE_TRANSPOSE_CHOICES = (
('FLIP_LEFT_RIGHT', _('Flip left to right')),
('FLIP_TOP_BOTTOM', _('Flip top to bottom')),
('ROTATE_90', _('Rotate 90 degrees counter-clockwise')),
('ROTATE_270', _('Rotate 90 degrees clockwise')),
('ROTATE_180', _('Rotate 180 degrees')),
)
WATERMARK_STYLE_CHOICES = (
('tile', _('Tile')),
('scale', _('Scale')),
)
# Prepare a list of image filters
filter_names = []
for n in dir(ImageFilter):
klass = getattr(ImageFilter, n)
if isclass(klass) and issubclass(klass, ImageFilter.BuiltinFilter) and \
hasattr(klass, 'name'):
filter_names.append(klass.__name__)
IMAGE_FILTERS_HELP_TEXT = _('Chain multiple filters using the following pattern "FILTER_ONE->FILTER_TWO->FILTER_THREE"'
'. Image filters will be applied in order. The following filters are available: %s.'
% (', '.join(filter_names)))
size_method_map = {}
class TagField(models.CharField):
"""Tags have been removed from Photologue, but the migrations still refer to them so this
Tagfield definition is left here.
"""
def __init__(self, **kwargs):
default_kwargs = {'max_length': 255, 'blank': True}
default_kwargs.update(kwargs)
super(TagField, self).__init__(**default_kwargs)
def get_internal_type(self):
return 'CharField'
@python_2_unicode_compatible
class Gallery(models.Model):
date_added = models.DateTimeField(_('date published'),
default=now)
title = models.CharField(_('title'),
max_length=250,
unique=True)
slug = models.SlugField(_('title slug'),
unique=True,
max_length=250,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
description = models.TextField(_('description'),
blank=True)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public galleries will be displayed '
'in the default views.'))
photos = SortedManyToManyField('photologue.Photo',
related_name='galleries',
verbose_name=_('photos'),
blank=True)
sites = models.ManyToManyField(Site, verbose_name=_(u'sites'),
blank=True)
objects = GalleryQuerySet.as_manager()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _('gallery')
verbose_name_plural = _('galleries')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('photologue:pl-gallery', args=[self.slug])
def latest(self, limit=LATEST_LIMIT, public=True):
if not limit:
limit = self.photo_count()
if public:
return self.public()[:limit]
else:
return self.photos.filter(sites__id=settings.SITE_ID)[:limit]
def sample(self, count=None, public=True):
"""Return a sample of photos, ordered at random.
If the 'count' is not specified, it will return a number of photos
limited by the GALLERY_SAMPLE_SIZE setting.
"""
if not count:
count = SAMPLE_SIZE
if count > self.photo_count():
count = self.photo_count()
if public:
photo_set = self.public()
else:
photo_set = self.photos.filter(sites__id=settings.SITE_ID)
return random.sample(set(photo_set), count)
def photo_count(self, public=True):
"""Return a count of all the photos in this gallery."""
if public:
return self.public().count()
else:
return self.photos.filter(sites__id=settings.SITE_ID).count()
photo_count.short_description = _('count')
def public(self):
"""Return a queryset of all the public photos in this gallery."""
return self.photos.is_public().filter(sites__id=settings.SITE_ID)
def orphaned_photos(self):
"""
Return all photos that belong to this gallery but don't share the
gallery's site.
"""
return self.photos.filter(is_public=True)\
.exclude(sites__id__in=self.sites.all())
class ImageModel(models.Model):
image = models.ImageField(_('image'),
max_length=IMAGE_FIELD_MAX_LENGTH,
upload_to=get_storage_path)
date_taken = models.DateTimeField(_('date taken'),
null=True,
blank=True,
help_text=_('Date image was taken; is obtained from the image EXIF data.'))
view_count = models.PositiveIntegerField(_('view count'),
default=0,
editable=False)
crop_from = models.CharField(_('crop from'),
blank=True,
max_length=10,
default='center',
choices=CROP_ANCHOR_CHOICES)
effect = models.ForeignKey('photologue.PhotoEffect',
null=True,
blank=True,
related_name="%(class)s_related",
verbose_name=_('effect'))
class Meta:
abstract = True
def EXIF(self, file=None):
try:
if file:
tags = exifread.process_file(file)
else:
with self.image.storage.open(self.image.name, 'rb') as file:
tags = exifread.process_file(file, details=False)
return tags
except:
return {}
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return u'<a href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
def cache_path(self):
return os.path.join(os.path.dirname(self.image.name), "cache")
def cache_url(self):
return '/'.join([os.path.dirname(self.image.url), "cache"])
def image_filename(self):
return os.path.basename(force_text(self.image.name))
def _get_filename_for_size(self, size):
size = getattr(size, 'name', size)
base, ext = os.path.splitext(self.image_filename())
return ''.join([base, '_', size, ext])
def _get_SIZE_photosize(self, size):
return PhotoSizeCache().sizes.get(size)
def _get_SIZE_size(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
return Image.open(self.image.storage.open(
self._get_SIZE_filename(size))).size
def _get_SIZE_url(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
if photosize.increment_count:
self.increment_count()
return '/'.join([
self.cache_url(),
filepath_to_uri(self._get_filename_for_size(photosize.name))])
def _get_SIZE_filename(self, size):
photosize = PhotoSizeCache().sizes.get(size)
return smart_str(os.path.join(self.cache_path(),
self._get_filename_for_size(photosize.name)))
def increment_count(self):
self.view_count += 1
models.Model.save(self)
def __getattr__(self, name):
global size_method_map
if not size_method_map:
init_size_method_map()
di = size_method_map.get(name, None)
if di is not None:
result = curry(getattr(self, di['base_name']), di['size'])
setattr(self, name, result)
return result
else:
raise AttributeError
def size_exists(self, photosize):
func = getattr(self, "get_%s_filename" % photosize.name, None)
if func is not None:
if self.image.storage.exists(func()):
return True
return False
def resize_image(self, im, photosize):
cur_width, cur_height = im.size
new_width, new_height = photosize.size
if photosize.crop:
ratio = max(float(new_width) / cur_width, float(new_height) / cur_height)
x = (cur_width * ratio)
y = (cur_height * ratio)
xd = abs(new_width - x)
yd = abs(new_height - y)
x_diff = int(xd / 2)
y_diff = int(yd / 2)
if self.crop_from == 'top':
box = (int(x_diff), 0, int(x_diff + new_width), new_height)
elif self.crop_from == 'left':
box = (0, int(y_diff), new_width, int(y_diff + new_height))
elif self.crop_from == 'bottom':
# y - yd = new_height
box = (int(x_diff), int(yd), int(x_diff + new_width), int(y))
elif self.crop_from == 'right':
# x - xd = new_width
box = (int(xd), int(y_diff), int(x), int(y_diff + new_height))
else:
box = (int(x_diff), int(y_diff), int(x_diff + new_width), int(y_diff + new_height))
im = im.resize((int(x), int(y)), Image.ANTIALIAS).crop(box)
else:
if not new_width == 0 and not new_height == 0:
ratio = min(float(new_width) / cur_width,
float(new_height) / cur_height)
else:
if new_width == 0:
ratio = float(new_height) / cur_height
else:
ratio = float(new_width) / cur_width
new_dimensions = (int(round(cur_width * ratio)),
int(round(cur_height * ratio)))
if new_dimensions[0] > cur_width or \
new_dimensions[1] > cur_height:
if not photosize.upscale:
return im
im = im.resize(new_dimensions, Image.ANTIALIAS)
return im
def create_size(self, photosize):
if self.size_exists(photosize):
return
try:
im = Image.open(self.image.storage.open(self.image.name))
except IOError:
return
# Save the original format
im_format = im.format
# Apply effect if found
if self.effect is not None:
im = self.effect.pre_process(im)
elif photosize.effect is not None:
im = photosize.effect.pre_process(im)
# Rotate if found & necessary
if 'Image Orientation' in self.EXIF() and \
self.EXIF().get('Image Orientation').values[0] in IMAGE_EXIF_ORIENTATION_MAP:
im = im.transpose(
IMAGE_EXIF_ORIENTATION_MAP[self.EXIF().get('Image Orientation').values[0]])
# Resize/crop image
if im.size != photosize.size and photosize.size != (0, 0):
im = self.resize_image(im, photosize)
# Apply watermark if found
if photosize.watermark is not None:
im = photosize.watermark.post_process(im)
# Apply effect if found
if self.effect is not None:
im = self.effect.post_process(im)
elif photosize.effect is not None:
im = photosize.effect.post_process(im)
# Save file
im_filename = getattr(self, "get_%s_filename" % photosize.name)()
try:
buffer = BytesIO()
if im_format != 'JPEG':
im.save(buffer, im_format)
else:
im.save(buffer, 'JPEG', quality=int(photosize.quality),
optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
self.image.storage.save(im_filename, buffer_contents)
except IOError as e:
if self.image.storage.exists(im_filename):
self.image.storage.delete(im_filename)
raise e
def remove_size(self, photosize, remove_dirs=True):
if not self.size_exists(photosize):
return
filename = getattr(self, "get_%s_filename" % photosize.name)()
if self.image.storage.exists(filename):
self.image.storage.delete(filename)
def clear_cache(self):
cache = PhotoSizeCache()
for photosize in cache.sizes.values():
self.remove_size(photosize, False)
def pre_cache(self):
cache = PhotoSizeCache()
for photosize in cache.sizes.values():
if photosize.pre_cache:
self.create_size(photosize)
def __init__(self, *args, **kwargs):
super(ImageModel, self).__init__(*args, **kwargs)
self._old_image = self.image
def save(self, *args, **kwargs):
image_has_changed = False
if self._get_pk_val() and (self._old_image != self.image):
image_has_changed = True
# If we have changed the image, we need to clear from the cache all instances of the old
# image; clear_cache() works on the current (new) image, and in turn calls several other methods.
# Changing them all to act on the old image was a lot of changes, so instead we temporarily swap old
# and new images.
new_image = self.image
self.image = self._old_image
self.clear_cache()
self.image = new_image # Back to the new image.
self._old_image.storage.delete(self._old_image.name) # Delete (old) base image.
if self.date_taken is None or image_has_changed:
# Attempt to get the date the photo was taken from the EXIF data.
try:
exif_date = self.EXIF(self.image.file).get('EXIF DateTimeOriginal', None)
if exif_date is not None:
d, t = exif_date.values.split()
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
logger.error('Failed to read EXIF DateTimeOriginal', exc_info=True)
super(ImageModel, self).save(*args, **kwargs)
self.pre_cache()
def delete(self):
assert self._get_pk_val() is not None, \
"%s object can't be deleted because its %s attribute is set to None." % \
(self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
# Files associated to a FileField have to be manually deleted:
# https://docs.djangoproject.com/en/dev/releases/1.3/#deleting-a-model-doesn-t-delete-associated-files
# http://haineault.com/blog/147/
# The data loss scenarios mentioned in the docs hopefully do not apply
# to Photologue!
super(ImageModel, self).delete()
self.image.storage.delete(self.image.name)
@python_2_unicode_compatible
class Photo(ImageModel):
title = models.CharField(_('title'),
max_length=250,
unique=True)
slug = models.SlugField(_('slug'),
unique=True,
max_length=250,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
caption = models.TextField(_('caption'),
blank=True)
date_added = models.DateTimeField(_('date added'),
default=now)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public photographs will be displayed in the default views.'))
sites = models.ManyToManyField(Site, verbose_name=_(u'sites'),
blank=True)
objects = PhotoQuerySet.as_manager()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _("photo")
verbose_name_plural = _("photos")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.slug is None:
self.slug = slugify(self.title)
super(Photo, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('photologue:pl-photo', args=[self.slug])
def public_galleries(self):
"""Return the public galleries to which this photo belongs."""
return self.galleries.filter(is_public=True)
def get_previous_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
previous = None
for photo in photos:
if photo == self:
return previous
previous = photo
def get_next_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
matched = False
for photo in photos:
if matched:
return photo
if photo == self:
matched = True
return None
@python_2_unicode_compatible
class BaseEffect(models.Model):
name = models.CharField(_('name'),
max_length=30,
unique=True)
description = models.TextField(_('description'),
blank=True)
class Meta:
abstract = True
def sample_dir(self):
return os.path.join(PHOTOLOGUE_DIR, 'samples')
def sample_url(self):
return settings.MEDIA_URL + '/'.join([PHOTOLOGUE_DIR, 'samples', '%s %s.jpg' % (self.name.lower(), 'sample')])
def sample_filename(self):
return os.path.join(self.sample_dir(), '%s %s.jpg' % (self.name.lower(), 'sample'))
def create_sample(self):
try:
im = Image.open(SAMPLE_IMAGE_PATH)
except IOError:
raise IOError(
'Photologue was unable to open the sample image: %s.' % SAMPLE_IMAGE_PATH)
im = self.process(im)
buffer = BytesIO()
im.save(buffer, 'JPEG', quality=90, optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
default_storage.save(self.sample_filename(), buffer_contents)
def admin_sample(self):
return u'<img src="%s">' % self.sample_url()
admin_sample.short_description = 'Sample'
admin_sample.allow_tags = True
def pre_process(self, im):
return im
def post_process(self, im):
return im
def process(self, im):
im = self.pre_process(im)
im = self.post_process(im)
return im
def __str__(self):
return self.name
def save(self, *args, **kwargs):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.save(self, *args, **kwargs)
self.create_sample()
for size in self.photo_sizes.all():
size.clear_cache()
# try to clear all related subclasses of ImageModel
for prop in [prop for prop in dir(self) if prop[-8:] == '_related']:
for obj in getattr(self, prop).all():
obj.clear_cache()
obj.pre_cache()
def delete(self):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.delete(self)
class PhotoEffect(BaseEffect):
""" A pre-defined effect to apply to photos """
transpose_method = models.CharField(_('rotate or flip'),
max_length=15,
blank=True,
choices=IMAGE_TRANSPOSE_CHOICES)
color = models.FloatField(_('color'),
default=1.0,
help_text=_('A factor of 0.0 gives a black and white image, a factor of 1.0 gives the '
'original image.'))
brightness = models.FloatField(_('brightness'),
default=1.0,
help_text=_('A factor of 0.0 gives a black image, a factor of 1.0 gives the '
'original image.'))
contrast = models.FloatField(_('contrast'),
default=1.0,
help_text=_('A factor of 0.0 gives a solid grey image, a factor of 1.0 gives the '
'original image.'))
sharpness = models.FloatField(_('sharpness'),
default=1.0,
help_text=_('A factor of 0.0 gives a blurred image, a factor of 1.0 gives the '
'original image.'))
filters = models.CharField(_('filters'),
max_length=200,
blank=True,
help_text=_(IMAGE_FILTERS_HELP_TEXT))
reflection_size = models.FloatField(_('size'),
default=0,
help_text=_('The height of the reflection as a percentage of the orignal '
'image. A factor of 0.0 adds no reflection, a factor of 1.0 adds a'
' reflection equal to the height of the orignal image.'))
reflection_strength = models.FloatField(_('strength'),
default=0.6,
help_text=_('The initial opacity of the reflection gradient.'))
background_color = models.CharField(_('color'),
max_length=7,
default="#FFFFFF",
help_text=_('The background color of the reflection gradient. Set this to '
'match the background color of your page.'))
class Meta:
verbose_name = _("photo effect")
verbose_name_plural = _("photo effects")
def pre_process(self, im):
if self.transpose_method != '':
method = getattr(Image, self.transpose_method)
im = im.transpose(method)
if im.mode != 'RGB' and im.mode != 'RGBA':
return im
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
factor = getattr(self, name.lower())
if factor != 1.0:
im = getattr(ImageEnhance, name)(im).enhance(factor)
for name in self.filters.split('->'):
image_filter = getattr(ImageFilter, name.upper(), None)
if image_filter is not None:
try:
im = im.filter(image_filter)
except ValueError:
pass
return im
def post_process(self, im):
if self.reflection_size != 0.0:
im = add_reflection(im, bgcolor=self.background_color,
amount=self.reflection_size, opacity=self.reflection_strength)
return im
class Watermark(BaseEffect):
image = models.ImageField(_('image'),
upload_to=PHOTOLOGUE_DIR + "/watermarks")
style = models.CharField(_('style'),
max_length=5,
choices=WATERMARK_STYLE_CHOICES,
default='scale')
opacity = models.FloatField(_('opacity'),
default=1,
help_text=_("The opacity of the overlay."))
class Meta:
verbose_name = _('watermark')
verbose_name_plural = _('watermarks')
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
super(Watermark, self).delete()
self.image.storage.delete(self.image.name)
def post_process(self, im):
mark = Image.open(self.image.storage.open(self.image.name))
return apply_watermark(im, mark, self.style, self.opacity)
@python_2_unicode_compatible
class PhotoSize(models.Model):
"""About the Photosize name: it's used to create get_PHOTOSIZE_url() methods,
so the name has to follow the same restrictions as any Python method name,
e.g. no spaces or non-ascii characters."""
name = models.CharField(_('name'),
max_length=40,
unique=True,
help_text=_(
'Photo size name should contain only letters, numbers and underscores. Examples: '
'"thumbnail", "display", "small", "main_page_widget".'),
validators=[RegexValidator(regex='^[a-z0-9_]+$',
message='Use only plain lowercase letters (ASCII), numbers and '
'underscores.'
)]
)
width = models.PositiveIntegerField(_('width'),
default=0,
help_text=_(
'If width is set to "0" the image will be scaled to the supplied height.'))
height = models.PositiveIntegerField(_('height'),
default=0,
help_text=_(
'If height is set to "0" the image will be scaled to the supplied width'))
quality = models.PositiveIntegerField(_('quality'),
choices=JPEG_QUALITY_CHOICES,
default=70,
help_text=_('JPEG image quality.'))
upscale = models.BooleanField(_('upscale images?'),
default=False,
help_text=_('If selected the image will be scaled up if necessary to fit the '
'supplied dimensions. Cropped sizes will be upscaled regardless of this '
'setting.')
)
crop = models.BooleanField(_('crop to fit?'),
default=False,
help_text=_('If selected the image will be scaled and cropped to fit the supplied '
'dimensions.'))
pre_cache = models.BooleanField(_('pre-cache?'),
default=False,
help_text=_('If selected this photo size will be pre-cached as photos are added.'))
increment_count = models.BooleanField(_('increment view count?'),
default=False,
help_text=_('If selected the image\'s "view_count" will be incremented when '
'this photo size is displayed.'))
effect = models.ForeignKey('photologue.PhotoEffect',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('photo effect'))
watermark = models.ForeignKey('photologue.Watermark',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('watermark image'))
class Meta:
ordering = ['width', 'height']
verbose_name = _('photo size')
verbose_name_plural = _('photo sizes')
def __str__(self):
return self.name
def clear_cache(self):
for cls in ImageModel.__subclasses__():
for obj in cls.objects.all():
obj.remove_size(self)
if self.pre_cache:
obj.create_size(self)
PhotoSizeCache().reset()
def clean(self):
if self.crop is True:
if self.width == 0 or self.height == 0:
raise ValidationError(
_("Can only crop photos if both width and height dimensions are set."))
def save(self, *args, **kwargs):
super(PhotoSize, self).save(*args, **kwargs)
PhotoSizeCache().reset()
self.clear_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
super(PhotoSize, self).delete()
def _get_size(self):
return (self.width, self.height)
def _set_size(self, value):
self.width, self.height = value
size = property(_get_size, _set_size)
class PhotoSizeCache(object):
__state = {"sizes": {}}
def __init__(self):
self.__dict__ = self.__state
if not len(self.sizes):
sizes = PhotoSize.objects.all()
for size in sizes:
self.sizes[size.name] = size
def reset(self):
global size_method_map
size_method_map = {}
self.sizes = {}
def init_size_method_map():
global size_method_map
for size in PhotoSizeCache().sizes.keys():
size_method_map['get_%s_size' % size] = \
{'base_name': '_get_SIZE_size', 'size': size}
size_method_map['get_%s_photosize' % size] = \
{'base_name': '_get_SIZE_photosize', 'size': size}
size_method_map['get_%s_url' % size] = \
{'base_name': '_get_SIZE_url', 'size': size}
size_method_map['get_%s_filename' % size] = \
{'base_name': '_get_SIZE_filename', 'size': size}
def add_default_site(instance, created, **kwargs):
"""
Called via Django's signals when an instance is created.
In case PHOTOLOGUE_MULTISITE is False, the current site (i.e.
``settings.SITE_ID``) will always be added to the site relations if none are
present.
"""
if not created:
return
if getattr(settings, 'PHOTOLOGUE_MULTISITE', False):
return
if instance.sites.exists():
return
instance.sites.add(Site.objects.get_current())
post_save.connect(add_default_site, sender=Gallery)
post_save.connect(add_default_site, sender=Photo)
|
fake-name/ReadableWebProxy | refs/heads/master | WebMirror/management/rss_parser_funcs/feed_parse_extractAcountrybetweenBlogspotCom.py | 1 |
def extractAcountrybetweenBlogspotCom(item):
'''
Parser for 'acountrybetween.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
xavierwu/scikit-learn | refs/heads/master | sklearn/tests/test_random_projection.py | 79 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
charris/numpy | refs/heads/dependabot/pip/mypy-0.910 | numpy/core/einsumfunc.py | 9 | """
Implementation of optimized einsum.
"""
import itertools
import operator
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
"""
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
"""Compute the cost (removed size + flops) and resultant indices for
performing the contraction specified by ``positions``.
Parameters
----------
positions : tuple of int
The locations of the proposed tensors to contract.
input_sets : list of sets
The indices found on each tensors.
output_set : set
The output indices of the expression.
idx_dict : dict
Mapping of each index to its size.
memory_limit : int
The total allowed size for an intermediary tensor.
path_cost : int
The contraction cost so far.
naive_cost : int
The cost of the unoptimized expression.
Returns
-------
cost : (int, int)
A tuple containing the size of any indices removed, and the flop cost.
positions : tuple of int
The locations of the proposed tensors to contract.
new_input_sets : list of sets
The resulting new list of indices if this proposed contraction is performed.
"""
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(idx_result, idx_dict)
if new_size > memory_limit:
return None
# Build sort tuple
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
removed_size = sum(old_sizes) - new_size
# NB: removed_size used to be just the size of any removed indices i.e.:
# helpers.compute_size_by_dict(idx_removed, idx_dict)
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
sort = (-removed_size, cost)
# Sieve based on total cost as well
if (path_cost + cost) > naive_cost:
return None
# Add contraction to possible choices
return [sort, positions, new_input_sets]
def _update_other_results(results, best):
"""Update the positions and provisional input_sets of ``results`` based on
performing the contraction result ``best``. Remove any involving the tensors
contracted.
Parameters
----------
results : list
List of contraction results produced by ``_parse_possible_contraction``.
best : list
The best contraction of ``results`` i.e. the one that will be performed.
Returns
-------
mod_results : list
The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
bx, by = best_con
mod_results = []
for cost, (x, y), con_sets in results:
# Ignore results involving tensors just contracted
if x in best_con or y in best_con:
continue
# Update the input_sets
del con_sets[by - int(by > x) - int(by > y)]
del con_sets[bx - int(bx > x) - int(bx > y)]
con_sets.insert(-1, best[2][-1])
# Update the position indices
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
mod_results.append((cost, mod_con, con_sets))
return mod_results
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
# Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
elif len(input_sets) == 2:
return [(0, 1)]
# Build up a naive cost
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
# Initially iterate over all pairs
comb_iter = itertools.combinations(range(len(input_sets)), 2)
known_contractions = []
path_cost = 0
path = []
for iteration in range(len(input_sets) - 1):
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
for positions in comb_iter:
# Always initially ignore outer products
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
naive_cost)
if result is not None:
known_contractions.append(result)
# If we do not have a inner contraction, rescan pairs including outer products
if len(known_contractions) == 0:
# Then check the outer products
for positions in itertools.combinations(range(len(input_sets)), 2):
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
path_cost, naive_cost)
if result is not None:
known_contractions.append(result)
# If we still did not find any remaining contractions, default back to einsum like behavior
if len(known_contractions) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(known_contractions, key=lambda x: x[0])
# Now propagate as many unused contractions as possible to next iteration
known_contractions = _update_other_results(known_contractions, best)
# Next iteration only compute contractions with the new tensor
# All other contractions have been accounted for
input_sets = best[2]
new_tensor_pos = len(input_sets) - 1
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
# Update path and total cost
path.append(best[1])
path_cost += best[0][1]
return path
def _can_dot(inputs, result, idx_removed):
"""
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
Parameters
----------
inputs : list of str
Specifies the subscripts for summation.
result : str
Resulting summation.
idx_removed : set
Indices that are removed in the summation
Returns
-------
type : bool
Returns true if BLAS should and can be used, else False
Notes
-----
If the operations is BLAS level 1 or 2 and is not already aligned
we default back to einsum as the memory movement to copy is more
costly than the operation itself.
Examples
--------
# Standard GEMM operation
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
True
# Can use the standard BLAS, but requires odd data movement
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
False
# DDOT where the memory is not aligned
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
False
"""
# All `dot` calls remove indices
if len(idx_removed) == 0:
return False
# BLAS can only handle two operands
if len(inputs) != 2:
return False
input_left, input_right = inputs
for c in set(input_left + input_right):
# can't deal with repeated indices on same input or more than 2 total
nl, nr = input_left.count(c), input_right.count(c)
if (nl > 1) or (nr > 1) or (nl + nr > 2):
return False
# can't do implicit summation or dimension collapse e.g.
# "ab,bc->c" (implicitly sum over 'a')
# "ab,ca->ca" (take diagonal of 'a')
if nl + nr - 1 == int(c in result):
return False
# Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# DDOT with aligned data
if input_left == input_right:
return True
# DDOT without aligned data (better to use einsum)
if set_left == set_right:
return False
# Handle the 4 possible (aligned) GEMV or GEMM cases
# GEMM or GEMV no transpose
if input_left[-rs:] == input_right[:rs]:
return True
# GEMM or GEMV transpose both
if input_left[:rs] == input_right[-rs:]:
return True
# GEMM or GEMV transpose right
if input_left[-rs:] == input_right[-rs:]:
return True
# GEMM or GEMV transpose left
if input_left[:rs] == input_right[:rs]:
return True
# Einsum is faster than GEMV if we have to copy data
if not keep_left or not keep_right:
return False
# We are a matrix-matrix product, but we need to copy data
return True
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> np.random.seed(123)
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b]) # may vary
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b]) # may vary
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
else:
try:
s = operator.index(s)
except TypeError as e:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis") from e
subscripts += einsum_symbols[s]
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
else:
try:
s = operator.index(s)
except TypeError as e:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis") from e
subscripts += einsum_symbols[s]
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(operands[num].ndim, 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
# signatures), so as a practical shortcut we dispatch on everything.
# Strings will be ignored for dispatching since they don't define
# __array_function__.
return operands
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, optimize='greedy', einsum_call=False):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as represented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> np.random.seed(123)
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il # may vary
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
... optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Figure out what the path really is
path_type = optimize
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = einsum_call
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d."
% (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
# Build out broadcast indices
if dim == 1:
broadcast_indices[tnum].append(char)
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (char, tnum, dimension_dict[char], dim))
else:
dimension_dict[char] = dim
# Convert broadcast inds to sets
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
size_list = [_compute_size_by_dict(term, dimension_dict)
for term in input_list + [output_subscript]]
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
bcast |= broadcast_indices.pop(x)
new_bcast_inds = bcast - idx_removed
# If we're broadcasting, nix blas
if not len(idx_removed & bcast):
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
else:
do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
yield from operands
yield out
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, out=None, optimize=False, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
einops :
similar verbose interface is provided by
`einops <https://github.com/arogozhnikov/einops>`_ package to cover
additional operations: transpose, reshape/flatten, repeat/tile,
squeeze/unsqueeze and reductions.
opt_einsum :
`opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
optimizes contraction order for einsum-like expressions
in backend-agnostic manner.
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. In some cases 'optimal'
will return the superlative path through a more expensive, exhaustive search.
For iterative calculations it may be advisable to calculate the optimal path
once and reuse that path by supplying it as an argument. An example is given
below.
See :py:func:`numpy.einsum_path` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
'optimal' path and repeatedly applying it, using an
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
Greedy `einsum` (faster optimal path approximation): ~160ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
Optimal `einsum` (best usage pattern in some use cases): ~110ms
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
# Special handling if out is specified
specified_out = out is not None
# If no optimization, run pure einsum
if optimize is False:
if specified_out:
kwargs['out'] = out
return c_einsum(*operands, **kwargs)
# Check the kwargs to avoid a more cryptic error later, without having to
# repeat default values here
valid_einsum_kwargs = ['dtype', 'order', 'casting']
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_einsum_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize,
einsum_call=True)
# Handle order kwarg for output array, c_einsum allows mixed case
output_order = kwargs.pop('order', 'K')
if output_order.upper() == 'A':
if all(arr.flags.f_contiguous for arr in operands):
output_order = 'F'
else:
output_order = 'C'
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = [operands.pop(x) for x in inds]
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
# Call tensordot if still possible
if blas:
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
# Find indices to contract over
left_pos, right_pos = [], []
for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
# Contract!
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
kwargs["out"] = out
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
kwargs["out"] = out
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out
else:
return asanyarray(operands[0], order=output_order)
|
mxjl620/scikit-learn | refs/heads/master | examples/ensemble/plot_ensemble_oob.py | 259 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
nerdyLawman/officeHack | refs/heads/master | src/game/game_messages.py | 1 | from libtcod import libtcodpy as libtcod
import gameconfig
# GAME MESSAGES
DEFINED_MESSAGES = {
'WELCOME_MESSAGE' : ['Welcome to your NEW JOB.', libtcod.flame],
'LEVEL_REST_MESSAGE' : ['You take a moment to lay low.', libtcod.sky],
'LEVEL_CONTINUE_MESSAGE' : ['You decide to continue probing deeper.', libtcod.flame],
'WAIT_TURN_MESSAGE' : ['You WAIT a turn for the PARANOIA to close in on you.', libtcod.flame],
'FULL_INVENTORY' : ['Your INVENTORY is FULL! You can\'t PICK UP: ', libtcod.yellow],
'DROPPED_MESSAGE' : ['You DROPPED a ', libtcod.yellow],
}
PLAYER_LEVEL_UP = 'Your skills increase. LEVEL UP! Now at level: '
MENU_OVER = 'Cannot have a MENU with more than 26 OPTIONS!'
# MESSAGE BOXES
NO_LOAD_DATA = 'No saved gamedata to load.'
EMPTY_INVENTORY = 'INVENTORY is EMPTY.'
# TERMINALS MESSAGES
TERMINAL_WELCOME = 'Welcome to '
TERMINAL_TITLE = 'HAPPY TERMINAL V1.0 - 1993\nEnter a command to begin. Help for options.'
TERMINAL_START_MESSAGE = 'Enter a command to begin. Help for options.'
# MENU HEADERS
HELP_HEADER = 'PRESS the key next to any of the OPTIONS for more INFORMATION'
INVENTORY_HEADER = 'PRESS the KEY next to an ITEM to USE it, or ESC to CANCEL'
DROP_HEADER = 'PRESS the KEY next to an ITEM to DROP it'
LOOK_HEADER = 'SELECT an OBJECT in you FOV for more INFORMATION'
|
Arc-Team/android_kernel_htc_holiday | refs/heads/cm-12.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
wooga/airflow | refs/heads/master | airflow/executors/celery_executor.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CeleryExecutor
.. seealso::
For more information on how the CeleryExecutor works, take a look at the guide:
:ref:`executor:CeleryExecutor`
"""
import logging
import math
import os
import subprocess
import time
import traceback
from multiprocessing import Pool, cpu_count
from typing import Any, List, Mapping, MutableMapping, Optional, Set, Tuple, Union
from celery import Celery, Task, states as celery_states
from celery.backends.base import BaseKeyValueStoreBackend
from celery.backends.database import DatabaseBackend, Task as TaskDb, session_cleanup
from celery.result import AsyncResult
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor, CommandType, EventBufferValueType
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKeyType
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.timeout import timeout
log = logging.getLogger(__name__)
# Make it constant for unit test.
CELERY_FETCH_ERR_MSG_HEADER = 'Error fetching Celery task state'
CELERY_SEND_ERR_MSG_HEADER = 'Error sending Celery task'
OPERATION_TIMEOUT = conf.getint('celery', 'operation_timeout', fallback=2)
'''
To start the celery worker, run the command:
airflow celery worker
'''
if conf.has_option('celery', 'celery_config_options'):
celery_configuration = conf.getimport('celery', 'celery_config_options')
else:
celery_configuration = DEFAULT_CELERY_CONFIG
app = Celery(
conf.get('celery', 'CELERY_APP_NAME'),
config_source=celery_configuration)
@app.task
def execute_command(command_to_exec: CommandType) -> None:
"""Executes command."""
if command_to_exec[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
log.info("Executing command in Celery: %s", command_to_exec)
env = os.environ.copy()
try:
subprocess.check_call(command_to_exec, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
msg = 'Celery command failed on host: ' + get_hostname()
raise AirflowException(msg)
class ExceptionWithTraceback:
"""
Wrapper class used to propagate exceptions to parent processes from subprocesses.
:param exception: The exception to wrap
:type exception: Exception
:param exception_traceback: The stacktrace to wrap
:type exception_traceback: str
"""
def __init__(self, exception: Exception, exception_traceback: str):
self.exception = exception
self.traceback = exception_traceback
# Task instance that is sent over Celery queues
# TaskInstanceKeyType, SimpleTaskInstance, Command, queue_name, CallableTask
TaskInstanceInCelery = Tuple[TaskInstanceKeyType, SimpleTaskInstance, CommandType, Optional[str], Task]
def send_task_to_executor(task_tuple: TaskInstanceInCelery) \
-> Tuple[TaskInstanceKeyType, CommandType, Union[AsyncResult, ExceptionWithTraceback]]:
"""Sends task to executor."""
key, _, command, queue, task_to_run = task_tuple
try:
with timeout(seconds=OPERATION_TIMEOUT):
result = task_to_run.apply_async(args=[command], queue=queue)
except Exception as e: # pylint: disable=broad-except
exception_traceback = "Celery Task ID: {}\n{}".format(key, traceback.format_exc())
result = ExceptionWithTraceback(e, exception_traceback)
return key, command, result
class CeleryExecutor(BaseExecutor):
"""
CeleryExecutor is recommended for production use of Airflow. It allows
distributing the execution of task instances to multiple worker nodes.
Celery is a simple, flexible and reliable distributed system to process
vast amounts of messages, while providing operations with the tools
required to maintain such a system.
"""
def __init__(self):
super().__init__()
# Celery doesn't support bulk sending the tasks (which can become a bottleneck on bigger clusters)
# so we use a multiprocessing pool to speed this up.
# How many worker processes are created for checking celery task state.
self._sync_parallelism = conf.getint('celery', 'SYNC_PARALLELISM')
if self._sync_parallelism == 0:
self._sync_parallelism = max(1, cpu_count() - 1)
self.bulk_state_fetcher = BulkStateFetcher(self._sync_parallelism)
self.tasks = {}
self.last_state = {}
def start(self) -> None:
self.log.debug(
'Starting Celery Executor using %s processes for syncing',
self._sync_parallelism
)
def _num_tasks_per_send_process(self, to_send_count: int) -> int:
"""
How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
def trigger_tasks(self, open_slots: int) -> None:
"""
Overwrite trigger_tasks function from BaseExecutor
:param open_slots: Number of open slots
:return:
"""
sorted_queue = self.order_queued_tasks_by_priority()
task_tuples_to_send: List[TaskInstanceInCelery] = []
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (command, _, queue, simple_ti) = sorted_queue.pop(0)
task_tuples_to_send.append((key, simple_ti, command, queue, execute_command))
if task_tuples_to_send:
first_task = next(t[4] for t in task_tuples_to_send)
# Celery state queries will stuck if we do not use one same backend
# for all tasks.
cached_celery_backend = first_task.backend
key_and_async_results = self._send_tasks_to_celery(task_tuples_to_send)
self.log.debug('Sent all tasks.')
for key, command, result in key_and_async_results:
if isinstance(result, ExceptionWithTraceback):
self.log.error( # pylint: disable=logging-not-lazy
CELERY_SEND_ERR_MSG_HEADER + ":%s\n%s\n", result.exception, result.traceback
)
elif result is not None:
# Only pops when enqueued successfully, otherwise keep it
# and expect scheduler loop to deal with it.
self.queued_tasks.pop(key)
result.backend = cached_celery_backend
self.running.add(key)
self.tasks[key] = result
self.last_state[key] = celery_states.PENDING
def _send_tasks_to_celery(self, task_tuples_to_send):
if len(task_tuples_to_send) == 1:
# One tuple, so send it in the main thread.
return [
send_task_to_executor(task_tuples_to_send[0])
]
# Use chunks instead of a work queue to reduce context switching
# since tasks are roughly uniform in size
chunksize = self._num_tasks_per_send_process(len(task_tuples_to_send))
num_processes = min(len(task_tuples_to_send), self._sync_parallelism)
with Pool(processes=num_processes) as send_pool:
key_and_async_results = send_pool.map(
send_task_to_executor,
task_tuples_to_send,
chunksize=chunksize)
return key_and_async_results
def sync(self) -> None:
if not self.tasks:
self.log.debug("No task to query celery, skipping sync")
return
self.update_all_task_states()
def update_all_task_states(self) -> None:
"""Updates states of the tasks."""
self.log.debug("Inquiring about %s celery task(s)", len(self.tasks))
state_and_info_by_celery_task_id = self.bulk_state_fetcher.get_many(self.tasks.values())
self.log.debug("Inquiries completed.")
for key, async_result in list(self.tasks.items()):
state, info = state_and_info_by_celery_task_id.get(async_result.task_id)
if state:
self.update_task_state(key, state, info)
def update_task_state(self, key: TaskInstanceKeyType, state: str, info: Any) -> None:
"""Updates state of a single task."""
# noinspection PyBroadException
try:
if self.last_state[key] != state:
if state == celery_states.SUCCESS:
self.success(key, info)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.FAILURE:
self.fail(key, info)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.REVOKED:
self.fail(key, info)
del self.tasks[key]
del self.last_state[key]
else:
self.log.info("Unexpected state: %s", state)
self.last_state[key] = state
except Exception: # pylint: disable=broad-except
self.log.exception("Error syncing the Celery executor, ignoring it.")
def end(self, synchronous: bool = False) -> None:
if synchronous:
while any([task.state not in celery_states.READY_STATES for task in self.tasks.values()]):
time.sleep(5)
self.sync()
def execute_async(self,
key: TaskInstanceKeyType,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None):
"""Do not allow async execution for Celery executor."""
raise AirflowException("No Async execution for Celery executor.")
def terminate(self):
pass
def fetch_celery_task_state(async_result: AsyncResult) -> \
Tuple[str, Union[str, ExceptionWithTraceback], Any]:
"""
Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param async_result: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type async_result: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state and the celery info
of the task
:rtype: tuple[str, str, str]
"""
try:
with timeout(seconds=OPERATION_TIMEOUT):
# Accessing state property of celery task will make actual network request
# to get the current state of the task
info = async_result.info if hasattr(async_result, 'info') else None
return async_result.task_id, async_result.state, info
except Exception as e: # pylint: disable=broad-except
exception_traceback = f"Celery Task ID: {async_result}\n{traceback.format_exc()}"
return async_result.task_id, ExceptionWithTraceback(e, exception_traceback), None
def _tasks_list_to_task_ids(async_tasks) -> Set[str]:
return {a.task_id for a in async_tasks}
class BulkStateFetcher(LoggingMixin):
"""
Gets status for many Celery tasks using the best method available
If BaseKeyValueStoreBackend is used as result backend, the mget method is used.
If DatabaseBackend is used as result backend, the SELECT ...WHER task_id IN (...) query is used
Otherwise, multiprocessing.Pool will be used. Each task status will be downloaded individually.
"""
def __init__(self, sync_parralelism=None):
super().__init__()
self._sync_parallelism = sync_parralelism
def get_many(self, async_results) -> Mapping[str, EventBufferValueType]:
"""
Gets status for many Celery tasks using the best method available.
"""
if isinstance(app.backend, BaseKeyValueStoreBackend):
result = self._get_many_from_kv_backend(async_results)
return result
if isinstance(app.backend, DatabaseBackend):
result = self._get_many_from_db_backend(async_results)
return result
result = self._get_many_using_multiprocessing(async_results)
self.log.debug("Fetched %d states for %d task", len(result), len(async_results))
return result
def _get_many_from_kv_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
keys = [app.backend.get_key_for_task(k) for k in task_ids]
values = app.backend.mget(keys)
task_results = [app.backend.decode_result(v) for v in values if v]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
def _get_many_from_db_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
session = app.backend.ResultSession()
with session_cleanup(session):
tasks = session.query(TaskDb).filter(TaskDb.task_id.in_(task_ids)).all()
task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
@staticmethod
def _prepare_state_and_info_by_task_dict(task_ids,
task_results_by_task_id) -> Mapping[str, EventBufferValueType]:
state_info: MutableMapping[str, EventBufferValueType] = {}
for task_id in task_ids:
task_result = task_results_by_task_id.get(task_id)
if task_result:
state = task_result["status"]
info = None if not hasattr(task_result, "info") else task_result["info"]
else:
state = celery_states.PENDING
info = None
state_info[task_id] = state, info
return state_info
def _get_many_using_multiprocessing(self, async_results) -> Mapping[str, EventBufferValueType]:
num_process = min(len(async_results), self._sync_parallelism)
with Pool(processes=num_process) as sync_pool:
chunksize = max(1, math.floor(math.ceil(1.0 * len(async_results) / self._sync_parallelism)))
task_id_to_states_and_info = sync_pool.map(
fetch_celery_task_state,
async_results,
chunksize=chunksize)
states_and_info_by_task_id: MutableMapping[str, EventBufferValueType] = {}
for task_id, state_or_exception, info in task_id_to_states_and_info:
if isinstance(state_or_exception, ExceptionWithTraceback):
self.log.error( # pylint: disable=logging-not-lazy
CELERY_FETCH_ERR_MSG_HEADER + ":%s\n%s\n",
state_or_exception.exception, state_or_exception.traceback
)
else:
states_and_info_by_task_id[task_id] = state_or_exception, info
return states_and_info_by_task_id
|
krunal3103/servo | refs/heads/master | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1/support/generate-text-emphasis-line-height-tests.py | 829 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
ntuecon/server | refs/heads/master | pyenv/Lib/site-packages/win32com/test/testDictionary.py | 4 | # testDictionary.py
#
import sys
import win32com.server.util
import win32com.test.util
import win32com.client
import traceback
import pythoncom
import pywintypes
import winerror
import unittest
error = "dictionary test error"
def MakeTestDictionary():
return win32com.client.Dispatch("Python.Dictionary")
def TestDictAgainst(dict,check):
for key, value in check.iteritems():
if dict(key) != value:
raise error("Indexing for '%s' gave the incorrect value - %s/%s" % (repr(key), repr(dict[key]), repr(check[key])))
# Ensure we have the correct version registered.
def Register(quiet):
import win32com.servers.dictionary
from win32com.test.util import RegisterPythonServer
RegisterPythonServer(win32com.servers.dictionary.__file__, 'Python.Dictionary')
def TestDict(quiet=None):
if quiet is None:
quiet = not "-v" in sys.argv
Register(quiet)
if not quiet: print "Simple enum test"
dict = MakeTestDictionary()
checkDict = {}
TestDictAgainst(dict, checkDict)
dict["NewKey"] = "NewValue"
checkDict["NewKey"] = "NewValue"
TestDictAgainst(dict, checkDict)
dict["NewKey"] = None
del checkDict["NewKey"]
TestDictAgainst(dict, checkDict)
if not quiet:
print "Failure tests"
try:
dict()
raise error("default method with no args worked when it shouldnt have!")
except pythoncom.com_error, (hr, desc, exc, argErr):
if hr != winerror.DISP_E_BADPARAMCOUNT:
raise error("Expected DISP_E_BADPARAMCOUNT - got %d (%s)" % (hr, desc))
try:
dict("hi", "there")
raise error("multiple args worked when it shouldnt have!")
except pythoncom.com_error, (hr, desc, exc, argErr):
if hr != winerror.DISP_E_BADPARAMCOUNT:
raise error("Expected DISP_E_BADPARAMCOUNT - got %d (%s)" % (hr, desc))
try:
dict(0)
raise error("int key worked when it shouldnt have!")
except pythoncom.com_error, (hr, desc, exc, argErr):
if hr != winerror.DISP_E_TYPEMISMATCH:
raise error("Expected DISP_E_TYPEMISMATCH - got %d (%s)" % (hr, desc))
if not quiet:
print "Python.Dictionary tests complete."
class TestCase(win32com.test.util.TestCase):
def testDict(self):
TestDict()
if __name__=='__main__':
unittest.main()
|
Danath/gyp | refs/heads/master | test/lib/TestCommon.py | 307 | """
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python_executable
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import copy
import os
import os.path
import stat
import string
import sys
import types
import UserList
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = ''
dll_suffix = '.dll'
elif string.find(sys.platform, 'irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif string.find(sys.platform, 'darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif string.find(sys.platform, 'sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
def _failed(self, status = 0):
if self.status is None or status is None:
return None
try:
return _status(self) not in status
except TypeError:
# status wasn't an iterable
return _status(self) != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
apply(TestCmd.__init__, [self], kw)
os.chdir(self.workdir)
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
unwritable = filter(lambda x, iw=is_writable: not iw(x), existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if unwritable:
print "Unwritable files: `%s'" % string.join(unwritable, "', `")
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb'):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, required) != -1)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
missing = []
for line in lines:
if not find(output, line):
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
for line in lines:
if find(output, line):
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_lines(self, lines, output, title=None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
missing = filter(lambda x: not os.path.exists(x), files)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb'):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
try:
self.fail_test(not self.match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb'):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, banned) != -1)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
unexpected = []
for line in lines:
if find(output, line):
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None):
return self.must_not_contain_any_line(output, lines, title)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing = filter(os.path.exists, files)
if existing:
print "Unexpected files exist: `%s'" % string.join(existing, "', `")
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
writable = filter(is_writable, existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if writable:
print "Writable files: `%s'" % string.join(writable, "', `")
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, str(_status(self)), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stdout is None and not match(actual_stdout, expected_stdout):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stderr is None and not match(actual_stderr, expected_stderr):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
This handles the "options" keyword argument and exceptions.
"""
options = kw.pop('options', None)
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
try:
return apply(TestCmd.start,
(self, program, interpreter, arguments, universal_newlines),
kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
apply(TestCmd.finish, (self, popen,), kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The arguments are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
kw['arguments'] = arguments
match = kw.pop('match', self.match)
apply(TestCmd.run, [self], kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
jyap808/jumbucks | refs/heads/master | share/qt/extract_strings_qt.py | 1294 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
takaakiaoki/PyFoam | refs/heads/master | examples/TestRunner/runIcoFoamCavityParallel.py | 3 | #! /usr/bin/env python
from runIcoFoamCavity import PlainIcoFoamCavity
from PyFoam.Applications.Decomposer import Decomposer
class ParallelIcoFoamCavity(PlainIcoFoamCavity):
def init(self):
self.setParameters(parallel=True,
autoDecompose=False,
nrCpus=2)
def decompose(self):
Decomposer(args=[self.caseDir,
str(self["nrCpus"]),
"--method=simple",
"--n=(2,1,1)",
"--delta=1e-5"])
if __name__=='__main__':
ParallelIcoFoamCavity().run()
|
spthaolt/socorro | refs/heads/master | socorro/webapi/servers.py | 8 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import web
import os
from socorro.webapi.classPartial import classWithPartialInit
from configman import Namespace, RequiredConfig
#==============================================================================
class WebServerBase(RequiredConfig):
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, services_list):
self.config = config
urls = []
for each in services_list:
if hasattr(each, 'uri'):
# this is the old middleware and dataservice
uri, cls = each.uri, each
else:
# this is middleware_app (soon to be deprecated)
uri, cls = each
if isinstance(uri, basestring):
uri = (uri, )
for a_uri in uri:
urls.append(a_uri)
urls.append(classWithPartialInit(cls, config))
self.urls = tuple(urls)
web.webapi.internalerror = web.debugerror
web.config.debug = False
self._identify()
self._wsgi_func = web.application(self.urls, globals()).wsgifunc()
#--------------------------------------------------------------------------
def run(self):
raise NotImplemented
#--------------------------------------------------------------------------
def _identify(self):
pass
#==============================================================================
class WSGIServer(WebServerBase):
"""When running under a wsgi compatible Web server, modwsgi requires a
reference to a "wsgifunc" In this varient of the WebServer class, the run
function returns the result of the webpy framework's wsgifunc.
Applications that use this class must provide a module level variable
'application' in the module given to the Web server modwsgi configuration.
The value of the variable must be the _wsgi_func.
"""
#--------------------------------------------------------------------------
def run(self):
return self._wsgi_func
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info('this is WSGIServer')
#--------------------------------------------------------------------------
@staticmethod
def get_socorro_config_path(wsgi_file):
wsgi_path = os.path.dirname(os.path.realpath(wsgi_file))
config_path = os.path.join(wsgi_path, '..', 'config')
return os.path.abspath(config_path)
ApacheModWSGI = WSGIServer # for backwards compatiblity
#==============================================================================
class StandAloneServer(WebServerBase):
required_config = Namespace()
required_config.add_option(
'port',
doc='the port to listen to for submissions',
default=8882
)
#==============================================================================
class CherryPy(StandAloneServer):
required_config = Namespace()
required_config.add_option(
'ip_address',
doc='the IP address from which to accept submissions',
default='127.0.0.1'
)
#--------------------------------------------------------------------------
def run(self):
web.runsimple(
self._wsgi_func,
(self.config.web_server.ip_address, self.config.web_server.port)
)
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info(
'this is CherryPy from web.py running standalone at %s:%d',
self.config.web_server.ip_address,
self.config.web_server.port
)
|
bastianh/swampdragon | refs/heads/master | swampdragon/serializers/serializer_tools.py | 9 | from collections import namedtuple
from django.db.models.fields.related import ForeignKey, ReverseSingleRelatedObjectDescriptor, \
ManyRelatedObjectsDescriptor, ReverseManyRelatedObjectsDescriptor, ForeignRelatedObjectsDescriptor, \
SingleRelatedObjectDescriptor
# from django.db.models.related import RelatedObject
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.fields.related import ManyToManyField
class FieldType(namedtuple('FieldType', 'field, model, fk, m2m')):
'''
Determine if a field is an m2m, reverse m2m, fk or reverse fk
'''
@property
def is_m2m(self):
return self.fk is False and self.m2m is True and isinstance(self.field, ForeignObjectRel)
@property
def is_reverse_m2m(self):
return self.fk is True and self.m2m is True and isinstance(self.field, ManyToManyField)
@property
def is_fk(self):
return self.fk is True and self.m2m is False and isinstance(self.field, ForeignKey)
@property
def is_reverse_fk(self):
return self.fk is False and self.m2m is False and isinstance(self.field, ForeignObjectRel)
def get_serializer_relationship_field(serializer, related_serializer):
if isinstance(serializer, type):
model = serializer().opts.model
else:
model = serializer.opts.model
if isinstance(related_serializer, type):
related_model = related_serializer().opts.model
else:
related_model = related_serializer.opts.model
for field_name in related_model._meta.get_all_field_names():
field_type = FieldType(*related_model._meta.get_field_by_name(field_name))
field = field_type.field
# Foreign key
if field_type.is_fk and field.rel.to is model:
return field.verbose_name
# Reverse foreign key
if field_type.is_reverse_fk and field.model is model:
return field.var_name
# M2m fields
if field_type.is_m2m and field.model is model:
return field.var_name
# Reverse m2m field
if field_type.is_reverse_m2m and field.rel.to is model:
return field.attname
def get_id_mappings(serializer):
if not serializer.instance:
return {}
data = {}
for field_name in serializer.opts.publish_fields:
if not hasattr(serializer, field_name):
continue
serializable_field = serializer._get_related_serializer(field_name)
if not hasattr(serializable_field, 'serialize'):
continue
field_type = getattr(serializer.opts.model, field_name)
is_fk = isinstance(field_type, ReverseSingleRelatedObjectDescriptor)
is_o2o = isinstance(field_type, SingleRelatedObjectDescriptor)
is_reverse_fk = isinstance(field_type, ForeignRelatedObjectsDescriptor)
is_m2m = isinstance(field_type, ManyRelatedObjectsDescriptor)
is_reverse_m2m = isinstance(field_type, ReverseManyRelatedObjectsDescriptor)
try:
val = getattr(serializer.instance, field_name)
except:
continue
if not val:
continue
if is_fk or is_o2o:
data['{}'.format(field_name)] = val.pk
continue
if is_reverse_fk or is_m2m or is_reverse_m2m:
data['{}'.format(field_name)] = list(val.all().values_list('pk', flat=True))
continue
return data
|
kenshay/ImageScripter | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/win32/lib/sspicon.py | 24 | # Generated by h2py from c:\microsoft sdk\include\sspi.h
ISSP_LEVEL = 32
ISSP_MODE = 1
ISSP_LEVEL = 32
ISSP_MODE = 0
ISSP_LEVEL = 32
ISSP_MODE = 1
def SEC_SUCCESS(Status): return ((Status) >= 0)
SECPKG_FLAG_INTEGRITY = 1
SECPKG_FLAG_PRIVACY = 2
SECPKG_FLAG_TOKEN_ONLY = 4
SECPKG_FLAG_DATAGRAM = 8
SECPKG_FLAG_CONNECTION = 16
SECPKG_FLAG_MULTI_REQUIRED = 32
SECPKG_FLAG_CLIENT_ONLY = 64
SECPKG_FLAG_EXTENDED_ERROR = 128
SECPKG_FLAG_IMPERSONATION = 256
SECPKG_FLAG_ACCEPT_WIN32_NAME = 512
SECPKG_FLAG_STREAM = 1024
SECPKG_FLAG_NEGOTIABLE = 2048
SECPKG_FLAG_GSS_COMPATIBLE = 4096
SECPKG_FLAG_LOGON = 8192
SECPKG_FLAG_ASCII_BUFFERS = 16384
SECPKG_FLAG_FRAGMENT = 32768
SECPKG_FLAG_MUTUAL_AUTH = 65536
SECPKG_FLAG_DELEGATION = 131072
SECPKG_FLAG_READONLY_WITH_CHECKSUM = 262144
SECPKG_ID_NONE = 65535
SECBUFFER_VERSION = 0
SECBUFFER_EMPTY = 0
SECBUFFER_DATA = 1
SECBUFFER_TOKEN = 2
SECBUFFER_PKG_PARAMS = 3
SECBUFFER_MISSING = 4
SECBUFFER_EXTRA = 5
SECBUFFER_STREAM_TRAILER = 6
SECBUFFER_STREAM_HEADER = 7
SECBUFFER_NEGOTIATION_INFO = 8
SECBUFFER_PADDING = 9
SECBUFFER_STREAM = 10
SECBUFFER_MECHLIST = 11
SECBUFFER_MECHLIST_SIGNATURE = 12
SECBUFFER_TARGET = 13
SECBUFFER_CHANNEL_BINDINGS = 14
SECBUFFER_ATTRMASK = (-268435456)
SECBUFFER_READONLY = (-2147483648)
SECBUFFER_READONLY_WITH_CHECKSUM = 268435456
SECBUFFER_RESERVED = 1610612736
SECURITY_NATIVE_DREP = 16
SECURITY_NETWORK_DREP = 0
SECPKG_CRED_INBOUND = 1
SECPKG_CRED_OUTBOUND = 2
SECPKG_CRED_BOTH = 3
SECPKG_CRED_DEFAULT = 4
SECPKG_CRED_RESERVED = -268435456
ISC_REQ_DELEGATE = 1
ISC_REQ_MUTUAL_AUTH = 2
ISC_REQ_REPLAY_DETECT = 4
ISC_REQ_SEQUENCE_DETECT = 8
ISC_REQ_CONFIDENTIALITY = 16
ISC_REQ_USE_SESSION_KEY = 32
ISC_REQ_PROMPT_FOR_CREDS = 64
ISC_REQ_USE_SUPPLIED_CREDS = 128
ISC_REQ_ALLOCATE_MEMORY = 256
ISC_REQ_USE_DCE_STYLE = 512
ISC_REQ_DATAGRAM = 1024
ISC_REQ_CONNECTION = 2048
ISC_REQ_CALL_LEVEL = 4096
ISC_REQ_FRAGMENT_SUPPLIED = 8192
ISC_REQ_EXTENDED_ERROR = 16384
ISC_REQ_STREAM = 32768
ISC_REQ_INTEGRITY = 65536
ISC_REQ_IDENTIFY = 131072
ISC_REQ_NULL_SESSION = 262144
ISC_REQ_MANUAL_CRED_VALIDATION = 524288
ISC_REQ_RESERVED1 = 1048576
ISC_REQ_FRAGMENT_TO_FIT = 2097152
ISC_REQ_HTTP = 0x10000000
ISC_RET_DELEGATE = 1
ISC_RET_MUTUAL_AUTH = 2
ISC_RET_REPLAY_DETECT = 4
ISC_RET_SEQUENCE_DETECT = 8
ISC_RET_CONFIDENTIALITY = 16
ISC_RET_USE_SESSION_KEY = 32
ISC_RET_USED_COLLECTED_CREDS = 64
ISC_RET_USED_SUPPLIED_CREDS = 128
ISC_RET_ALLOCATED_MEMORY = 256
ISC_RET_USED_DCE_STYLE = 512
ISC_RET_DATAGRAM = 1024
ISC_RET_CONNECTION = 2048
ISC_RET_INTERMEDIATE_RETURN = 4096
ISC_RET_CALL_LEVEL = 8192
ISC_RET_EXTENDED_ERROR = 16384
ISC_RET_STREAM = 32768
ISC_RET_INTEGRITY = 65536
ISC_RET_IDENTIFY = 131072
ISC_RET_NULL_SESSION = 262144
ISC_RET_MANUAL_CRED_VALIDATION = 524288
ISC_RET_RESERVED1 = 1048576
ISC_RET_FRAGMENT_ONLY = 2097152
ASC_REQ_DELEGATE = 1
ASC_REQ_MUTUAL_AUTH = 2
ASC_REQ_REPLAY_DETECT = 4
ASC_REQ_SEQUENCE_DETECT = 8
ASC_REQ_CONFIDENTIALITY = 16
ASC_REQ_USE_SESSION_KEY = 32
ASC_REQ_ALLOCATE_MEMORY = 256
ASC_REQ_USE_DCE_STYLE = 512
ASC_REQ_DATAGRAM = 1024
ASC_REQ_CONNECTION = 2048
ASC_REQ_CALL_LEVEL = 4096
ASC_REQ_EXTENDED_ERROR = 32768
ASC_REQ_STREAM = 65536
ASC_REQ_INTEGRITY = 131072
ASC_REQ_LICENSING = 262144
ASC_REQ_IDENTIFY = 524288
ASC_REQ_ALLOW_NULL_SESSION = 1048576
ASC_REQ_ALLOW_NON_USER_LOGONS = 2097152
ASC_REQ_ALLOW_CONTEXT_REPLAY = 4194304
ASC_REQ_FRAGMENT_TO_FIT = 8388608
ASC_REQ_FRAGMENT_SUPPLIED = 8192
ASC_REQ_NO_TOKEN = 16777216
ASC_RET_DELEGATE = 1
ASC_RET_MUTUAL_AUTH = 2
ASC_RET_REPLAY_DETECT = 4
ASC_RET_SEQUENCE_DETECT = 8
ASC_RET_CONFIDENTIALITY = 16
ASC_RET_USE_SESSION_KEY = 32
ASC_RET_ALLOCATED_MEMORY = 256
ASC_RET_USED_DCE_STYLE = 512
ASC_RET_DATAGRAM = 1024
ASC_RET_CONNECTION = 2048
ASC_RET_CALL_LEVEL = 8192
ASC_RET_THIRD_LEG_FAILED = 16384
ASC_RET_EXTENDED_ERROR = 32768
ASC_RET_STREAM = 65536
ASC_RET_INTEGRITY = 131072
ASC_RET_LICENSING = 262144
ASC_RET_IDENTIFY = 524288
ASC_RET_NULL_SESSION = 1048576
ASC_RET_ALLOW_NON_USER_LOGONS = 2097152
ASC_RET_ALLOW_CONTEXT_REPLAY = 4194304
ASC_RET_FRAGMENT_ONLY = 8388608
SECPKG_CRED_ATTR_NAMES = 1
SECPKG_ATTR_SIZES = 0
SECPKG_ATTR_NAMES = 1
SECPKG_ATTR_LIFESPAN = 2
SECPKG_ATTR_DCE_INFO = 3
SECPKG_ATTR_STREAM_SIZES = 4
SECPKG_ATTR_KEY_INFO = 5
SECPKG_ATTR_AUTHORITY = 6
SECPKG_ATTR_PROTO_INFO = 7
SECPKG_ATTR_PASSWORD_EXPIRY = 8
SECPKG_ATTR_SESSION_KEY = 9
SECPKG_ATTR_PACKAGE_INFO = 10
SECPKG_ATTR_USER_FLAGS = 11
SECPKG_ATTR_NEGOTIATION_INFO = 12
SECPKG_ATTR_NATIVE_NAMES = 13
SECPKG_ATTR_FLAGS = 14
SECPKG_ATTR_USE_VALIDATED = 15
SECPKG_ATTR_CREDENTIAL_NAME = 16
SECPKG_ATTR_TARGET_INFORMATION = 17
SECPKG_ATTR_ACCESS_TOKEN = 18
SECPKG_ATTR_TARGET = 19
SECPKG_ATTR_AUTHENTICATION_ID = 20
## attributes from schannel.h
SECPKG_ATTR_REMOTE_CERT_CONTEXT = 83
SECPKG_ATTR_LOCAL_CERT_CONTEXT = 84
SECPKG_ATTR_ROOT_STORE = 85
SECPKG_ATTR_SUPPORTED_ALGS = 86
SECPKG_ATTR_CIPHER_STRENGTHS = 87
SECPKG_ATTR_SUPPORTED_PROTOCOLS = 88
SECPKG_ATTR_ISSUER_LIST_EX = 89
SECPKG_ATTR_CONNECTION_INFO = 90
SECPKG_ATTR_EAP_KEY_BLOCK = 91
SECPKG_ATTR_MAPPED_CRED_ATTR = 92
SECPKG_ATTR_SESSION_INFO = 93
SECPKG_ATTR_APP_DATA = 94
SECPKG_NEGOTIATION_COMPLETE = 0
SECPKG_NEGOTIATION_OPTIMISTIC = 1
SECPKG_NEGOTIATION_IN_PROGRESS = 2
SECPKG_NEGOTIATION_DIRECT = 3
SECPKG_NEGOTIATION_TRY_MULTICRED = 4
SECPKG_CONTEXT_EXPORT_RESET_NEW = 1
SECPKG_CONTEXT_EXPORT_DELETE_OLD = 2
SECQOP_WRAP_NO_ENCRYPT = (-2147483647)
SECURITY_ENTRYPOINT_ANSIW = "InitSecurityInterfaceW"
SECURITY_ENTRYPOINT_ANSIA = "InitSecurityInterfaceA"
SECURITY_ENTRYPOINT16 = "INITSECURITYINTERFACEA"
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIW
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIA
SECURITY_ENTRYPOINT = SECURITY_ENTRYPOINT16
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT16
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION = 1
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION_2 = 2
SASL_OPTION_SEND_SIZE = 1
SASL_OPTION_RECV_SIZE = 2
SASL_OPTION_AUTHZ_STRING = 3
SASL_OPTION_AUTHZ_PROCESSING = 4
SEC_WINNT_AUTH_IDENTITY_ANSI = 1
SEC_WINNT_AUTH_IDENTITY_UNICODE = 2
SEC_WINNT_AUTH_IDENTITY_VERSION = 512
SEC_WINNT_AUTH_IDENTITY_MARSHALLED = 4
SEC_WINNT_AUTH_IDENTITY_ONLY = 8
SECPKG_OPTIONS_TYPE_UNKNOWN = 0
SECPKG_OPTIONS_TYPE_LSA = 1
SECPKG_OPTIONS_TYPE_SSPI = 2
SECPKG_OPTIONS_PERMANENT = 1
SEC_E_INSUFFICIENT_MEMORY = -2146893056
SEC_E_INVALID_HANDLE = -2146893055
SEC_E_UNSUPPORTED_FUNCTION = -2146893054
SEC_E_TARGET_UNKNOWN = -2146893053
SEC_E_INTERNAL_ERROR = -2146893052
SEC_E_SECPKG_NOT_FOUND = -2146893051
SEC_E_NOT_OWNER = -2146893050
SEC_E_CANNOT_INSTALL = -2146893049
SEC_E_INVALID_TOKEN = -2146893048
SEC_E_CANNOT_PACK = -2146893047
SEC_E_QOP_NOT_SUPPORTED = -2146893046
SEC_E_NO_IMPERSONATION = -2146893045
SEC_E_LOGON_DENIED = -2146893044
SEC_E_UNKNOWN_CREDENTIALS = -2146893043
SEC_E_NO_CREDENTIALS = -2146893042
SEC_E_MESSAGE_ALTERED = -2146893041
SEC_E_OUT_OF_SEQUENCE = -2146893040
SEC_E_NO_AUTHENTICATING_AUTHORITY = -2146893039
SEC_I_CONTINUE_NEEDED = 590610
SEC_I_COMPLETE_NEEDED = 590611
SEC_I_COMPLETE_AND_CONTINUE = 590612
SEC_I_LOCAL_LOGON = 590613
SEC_E_BAD_PKGID = -2146893034
SEC_E_CONTEXT_EXPIRED = -2146893033
SEC_I_CONTEXT_EXPIRED = 590615
SEC_E_INCOMPLETE_MESSAGE = -2146893032
SEC_E_INCOMPLETE_CREDENTIALS = -2146893024
SEC_E_BUFFER_TOO_SMALL = -2146893023
SEC_I_INCOMPLETE_CREDENTIALS = 590624
SEC_I_RENEGOTIATE = 590625
SEC_E_WRONG_PRINCIPAL = -2146893022
SEC_I_NO_LSA_CONTEXT = 590627
SEC_E_TIME_SKEW = -2146893020
SEC_E_UNTRUSTED_ROOT = -2146893019
SEC_E_ILLEGAL_MESSAGE = -2146893018
SEC_E_CERT_UNKNOWN = -2146893017
SEC_E_CERT_EXPIRED = -2146893016
SEC_E_ENCRYPT_FAILURE = -2146893015
SEC_E_DECRYPT_FAILURE = -2146893008
SEC_E_ALGORITHM_MISMATCH = -2146893007
SEC_E_SECURITY_QOS_FAILED = -2146893006
SEC_E_UNFINISHED_CONTEXT_DELETED = -2146893005
SEC_E_NO_TGT_REPLY = -2146893004
SEC_E_NO_IP_ADDRESSES = -2146893003
SEC_E_WRONG_CREDENTIAL_HANDLE = -2146893002
SEC_E_CRYPTO_SYSTEM_INVALID = -2146893001
SEC_E_MAX_REFERRALS_EXCEEDED = -2146893000
SEC_E_MUST_BE_KDC = -2146892999
SEC_E_STRONG_CRYPTO_NOT_SUPPORTED = -2146892998
SEC_E_TOO_MANY_PRINCIPALS = -2146892997
SEC_E_NO_PA_DATA = -2146892996
SEC_E_PKINIT_NAME_MISMATCH = -2146892995
SEC_E_SMARTCARD_LOGON_REQUIRED = -2146892994
SEC_E_SHUTDOWN_IN_PROGRESS = -2146892993
SEC_E_KDC_INVALID_REQUEST = -2146892992
SEC_E_KDC_UNABLE_TO_REFER = -2146892991
SEC_E_KDC_UNKNOWN_ETYPE = -2146892990
SEC_E_UNSUPPORTED_PREAUTH = -2146892989
SEC_E_DELEGATION_REQUIRED = -2146892987
SEC_E_BAD_BINDINGS = -2146892986
SEC_E_MULTIPLE_ACCOUNTS = -2146892985
SEC_E_NO_KERB_KEY = -2146892984
ERROR_IPSEC_QM_POLICY_EXISTS = 13000
ERROR_IPSEC_QM_POLICY_NOT_FOUND = 13001
ERROR_IPSEC_QM_POLICY_IN_USE = 13002
ERROR_IPSEC_MM_POLICY_EXISTS = 13003
ERROR_IPSEC_MM_POLICY_NOT_FOUND = 13004
ERROR_IPSEC_MM_POLICY_IN_USE = 13005
ERROR_IPSEC_MM_FILTER_EXISTS = 13006
ERROR_IPSEC_MM_FILTER_NOT_FOUND = 13007
ERROR_IPSEC_TRANSPORT_FILTER_EXISTS = 13008
ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND = 13009
ERROR_IPSEC_MM_AUTH_EXISTS = 13010
ERROR_IPSEC_MM_AUTH_NOT_FOUND = 13011
ERROR_IPSEC_MM_AUTH_IN_USE = 13012
ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND = 13013
ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND = 13014
ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND = 13015
ERROR_IPSEC_TUNNEL_FILTER_EXISTS = 13016
ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND = 13017
ERROR_IPSEC_MM_FILTER_PENDING_DELETION = 13018
ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION = 13019
ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION = 13020
ERROR_IPSEC_MM_POLICY_PENDING_DELETION = 13021
ERROR_IPSEC_MM_AUTH_PENDING_DELETION = 13022
ERROR_IPSEC_QM_POLICY_PENDING_DELETION = 13023
WARNING_IPSEC_MM_POLICY_PRUNED = 13024
WARNING_IPSEC_QM_POLICY_PRUNED = 13025
ERROR_IPSEC_IKE_NEG_STATUS_BEGIN = 13800
ERROR_IPSEC_IKE_AUTH_FAIL = 13801
ERROR_IPSEC_IKE_ATTRIB_FAIL = 13802
ERROR_IPSEC_IKE_NEGOTIATION_PENDING = 13803
ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR = 13804
ERROR_IPSEC_IKE_TIMED_OUT = 13805
ERROR_IPSEC_IKE_NO_CERT = 13806
ERROR_IPSEC_IKE_SA_DELETED = 13807
ERROR_IPSEC_IKE_SA_REAPED = 13808
ERROR_IPSEC_IKE_MM_ACQUIRE_DROP = 13809
ERROR_IPSEC_IKE_QM_ACQUIRE_DROP = 13810
ERROR_IPSEC_IKE_QUEUE_DROP_MM = 13811
ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM = 13812
ERROR_IPSEC_IKE_DROP_NO_RESPONSE = 13813
ERROR_IPSEC_IKE_MM_DELAY_DROP = 13814
ERROR_IPSEC_IKE_QM_DELAY_DROP = 13815
ERROR_IPSEC_IKE_ERROR = 13816
ERROR_IPSEC_IKE_CRL_FAILED = 13817
ERROR_IPSEC_IKE_INVALID_KEY_USAGE = 13818
ERROR_IPSEC_IKE_INVALID_CERT_TYPE = 13819
ERROR_IPSEC_IKE_NO_PRIVATE_KEY = 13820
ERROR_IPSEC_IKE_DH_FAIL = 13822
ERROR_IPSEC_IKE_INVALID_HEADER = 13824
ERROR_IPSEC_IKE_NO_POLICY = 13825
ERROR_IPSEC_IKE_INVALID_SIGNATURE = 13826
ERROR_IPSEC_IKE_KERBEROS_ERROR = 13827
ERROR_IPSEC_IKE_NO_PUBLIC_KEY = 13828
ERROR_IPSEC_IKE_PROCESS_ERR = 13829
ERROR_IPSEC_IKE_PROCESS_ERR_SA = 13830
ERROR_IPSEC_IKE_PROCESS_ERR_PROP = 13831
ERROR_IPSEC_IKE_PROCESS_ERR_TRANS = 13832
ERROR_IPSEC_IKE_PROCESS_ERR_KE = 13833
ERROR_IPSEC_IKE_PROCESS_ERR_ID = 13834
ERROR_IPSEC_IKE_PROCESS_ERR_CERT = 13835
ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ = 13836
ERROR_IPSEC_IKE_PROCESS_ERR_HASH = 13837
ERROR_IPSEC_IKE_PROCESS_ERR_SIG = 13838
ERROR_IPSEC_IKE_PROCESS_ERR_NONCE = 13839
ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY = 13840
ERROR_IPSEC_IKE_PROCESS_ERR_DELETE = 13841
ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR = 13842
ERROR_IPSEC_IKE_INVALID_PAYLOAD = 13843
ERROR_IPSEC_IKE_LOAD_SOFT_SA = 13844
ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN = 13845
ERROR_IPSEC_IKE_INVALID_COOKIE = 13846
ERROR_IPSEC_IKE_NO_PEER_CERT = 13847
ERROR_IPSEC_IKE_PEER_CRL_FAILED = 13848
ERROR_IPSEC_IKE_POLICY_CHANGE = 13849
ERROR_IPSEC_IKE_NO_MM_POLICY = 13850
ERROR_IPSEC_IKE_NOTCBPRIV = 13851
ERROR_IPSEC_IKE_SECLOADFAIL = 13852
ERROR_IPSEC_IKE_FAILSSPINIT = 13853
ERROR_IPSEC_IKE_FAILQUERYSSP = 13854
ERROR_IPSEC_IKE_SRVACQFAIL = 13855
ERROR_IPSEC_IKE_SRVQUERYCRED = 13856
ERROR_IPSEC_IKE_GETSPIFAIL = 13857
ERROR_IPSEC_IKE_INVALID_FILTER = 13858
ERROR_IPSEC_IKE_OUT_OF_MEMORY = 13859
ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED = 13860
ERROR_IPSEC_IKE_INVALID_POLICY = 13861
ERROR_IPSEC_IKE_UNKNOWN_DOI = 13862
ERROR_IPSEC_IKE_INVALID_SITUATION = 13863
ERROR_IPSEC_IKE_DH_FAILURE = 13864
ERROR_IPSEC_IKE_INVALID_GROUP = 13865
ERROR_IPSEC_IKE_ENCRYPT = 13866
ERROR_IPSEC_IKE_DECRYPT = 13867
ERROR_IPSEC_IKE_POLICY_MATCH = 13868
ERROR_IPSEC_IKE_UNSUPPORTED_ID = 13869
ERROR_IPSEC_IKE_INVALID_HASH = 13870
ERROR_IPSEC_IKE_INVALID_HASH_ALG = 13871
ERROR_IPSEC_IKE_INVALID_HASH_SIZE = 13872
ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG = 13873
ERROR_IPSEC_IKE_INVALID_AUTH_ALG = 13874
ERROR_IPSEC_IKE_INVALID_SIG = 13875
ERROR_IPSEC_IKE_LOAD_FAILED = 13876
ERROR_IPSEC_IKE_RPC_DELETE = 13877
ERROR_IPSEC_IKE_BENIGN_REINIT = 13878
ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY = 13879
ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN = 13881
ERROR_IPSEC_IKE_MM_LIMIT = 13882
ERROR_IPSEC_IKE_NEGOTIATION_DISABLED = 13883
ERROR_IPSEC_IKE_NEG_STATUS_END = 13884
CRYPT_E_MSG_ERROR = ((-2146889727))
CRYPT_E_UNKNOWN_ALGO = ((-2146889726))
CRYPT_E_OID_FORMAT = ((-2146889725))
CRYPT_E_INVALID_MSG_TYPE = ((-2146889724))
CRYPT_E_UNEXPECTED_ENCODING = ((-2146889723))
CRYPT_E_AUTH_ATTR_MISSING = ((-2146889722))
CRYPT_E_HASH_VALUE = ((-2146889721))
CRYPT_E_INVALID_INDEX = ((-2146889720))
CRYPT_E_ALREADY_DECRYPTED = ((-2146889719))
CRYPT_E_NOT_DECRYPTED = ((-2146889718))
CRYPT_E_RECIPIENT_NOT_FOUND = ((-2146889717))
CRYPT_E_CONTROL_TYPE = ((-2146889716))
CRYPT_E_ISSUER_SERIALNUMBER = ((-2146889715))
CRYPT_E_SIGNER_NOT_FOUND = ((-2146889714))
CRYPT_E_ATTRIBUTES_MISSING = ((-2146889713))
CRYPT_E_STREAM_MSG_NOT_READY = ((-2146889712))
CRYPT_E_STREAM_INSUFFICIENT_DATA = ((-2146889711))
CRYPT_I_NEW_PROTECTION_REQUIRED = (593938)
CRYPT_E_BAD_LEN = ((-2146885631))
CRYPT_E_BAD_ENCODE = ((-2146885630))
CRYPT_E_FILE_ERROR = ((-2146885629))
CRYPT_E_NOT_FOUND = ((-2146885628))
CRYPT_E_EXISTS = ((-2146885627))
CRYPT_E_NO_PROVIDER = ((-2146885626))
CRYPT_E_SELF_SIGNED = ((-2146885625))
CRYPT_E_DELETED_PREV = ((-2146885624))
CRYPT_E_NO_MATCH = ((-2146885623))
CRYPT_E_UNEXPECTED_MSG_TYPE = ((-2146885622))
CRYPT_E_NO_KEY_PROPERTY = ((-2146885621))
CRYPT_E_NO_DECRYPT_CERT = ((-2146885620))
CRYPT_E_BAD_MSG = ((-2146885619))
CRYPT_E_NO_SIGNER = ((-2146885618))
CRYPT_E_PENDING_CLOSE = ((-2146885617))
CRYPT_E_REVOKED = ((-2146885616))
CRYPT_E_NO_REVOCATION_DLL = ((-2146885615))
CRYPT_E_NO_REVOCATION_CHECK = ((-2146885614))
CRYPT_E_REVOCATION_OFFLINE = ((-2146885613))
CRYPT_E_NOT_IN_REVOCATION_DATABASE = ((-2146885612))
CRYPT_E_INVALID_NUMERIC_STRING = ((-2146885600))
CRYPT_E_INVALID_PRINTABLE_STRING = ((-2146885599))
CRYPT_E_INVALID_IA5_STRING = ((-2146885598))
CRYPT_E_INVALID_X500_STRING = ((-2146885597))
CRYPT_E_NOT_CHAR_STRING = ((-2146885596))
CRYPT_E_FILERESIZED = ((-2146885595))
CRYPT_E_SECURITY_SETTINGS = ((-2146885594))
CRYPT_E_NO_VERIFY_USAGE_DLL = ((-2146885593))
CRYPT_E_NO_VERIFY_USAGE_CHECK = ((-2146885592))
CRYPT_E_VERIFY_USAGE_OFFLINE = ((-2146885591))
CRYPT_E_NOT_IN_CTL = ((-2146885590))
CRYPT_E_NO_TRUSTED_SIGNER = ((-2146885589))
CRYPT_E_MISSING_PUBKEY_PARA = ((-2146885588))
CRYPT_E_OSS_ERROR = ((-2146881536))
## Kerberos message types for LsaCallAuthenticationPackage (from ntsecapi.h)
KerbDebugRequestMessage = 0
KerbQueryTicketCacheMessage = 1
KerbChangeMachinePasswordMessage = 2
KerbVerifyPacMessage = 3
KerbRetrieveTicketMessage = 4
KerbUpdateAddressesMessage = 5
KerbPurgeTicketCacheMessage = 6
KerbChangePasswordMessage = 7
KerbRetrieveEncodedTicketMessage = 8
KerbDecryptDataMessage = 9
KerbAddBindingCacheEntryMessage = 10
KerbSetPasswordMessage = 11
KerbSetPasswordExMessage = 12
KerbVerifyCredentialsMessage = 13
KerbQueryTicketCacheExMessage = 14
KerbPurgeTicketCacheExMessage = 15
KerbRefreshSmartcardCredentialsMessage = 16
KerbAddExtraCredentialsMessage = 17
KerbQuerySupplementalCredentialsMessage = 18
## messages used with msv1_0 from ntsecapi.h
MsV1_0Lm20ChallengeRequest = 0
MsV1_0Lm20GetChallengeResponse = 1
MsV1_0EnumerateUsers = 2
MsV1_0GetUserInfo = 3
MsV1_0ReLogonUsers = 4
MsV1_0ChangePassword = 5
MsV1_0ChangeCachedPassword = 6
MsV1_0GenericPassthrough = 7
MsV1_0CacheLogon = 8
MsV1_0SubAuth = 9
MsV1_0DeriveCredential = 10
MsV1_0CacheLookup = 11
MsV1_0SetProcessOption = 12
SEC_E_OK = 0
|
ecometrica/django-wapiti | refs/heads/master | wapiti/parsers.py | 1 | # Copyright (c) Ecometrica. All rights reserved.
# Distributed under the BSD license. See LICENSE for details.
import datetime as dt
from decimal import Decimal
from inspect import getargspec
import json
import re
import warnings
from django.db import models
from django.db.models.fields.files import FieldFile
from django.db.models.query import QuerySet
from django.http import HttpResponse
from wapiti import helpers
from wapiti.modelapi import ModelApi
# ISO 8601
DATE_RE = re.compile('((?<!")[0-9]{4}-[0-1]?[0-9]-[0-3]?[0-9](?!"))')
DATE_FORMAT = '%Y-%m-%d'
class ModelNotRegisteredError(Exception):
pass
HTML_STYLE = (
"""<style type="text/css">
body {
font-family: Helvetica;
font-size: 9px;
}
table {
width: 100%;
}
table tr td {
vertical-align: top;
padding: 4px;
margin: 0px;
font-weight: bold;
}
table tr td table {
width: 900px;
border: none;
border-bottom: 1px solid #666;
}
table tr td table tr td table {
width: 700px;
border: none;
border-top: 1px solid #aaa;
}
table tr td table tr td {
font-weight: normal;
}
table tr td table tr td table tr td {
border-bottom: 1px solid #aaa;
font-weight: normal;
}
table tr td table tr td table tr td table {
width: 500px;
border: none;
}
table tr td table tr td table tr td table tr td table {
width: 300px;
border: none;
}
table tr td table tr td table tr td table tr td table tr td {
border: none;
font-weight: normal;
padding: 0px;
font-size: 0.8em;
}
</style>
"""
)
class EcoJSONDecoder(json.JSONDecoder):
def decode(self, s):
try:
return super(EcoJSONDecoder, self).decode(s)
except ValueError:
return None
class Decoder(object):
def __init__(self, format):
self.format = format
self.decode = getattr(self, format)
def json(self, value):
#FIXME detect dates and turn to string
value = DATE_RE.subn(r'"\1"', value)[0]
_decoded = json.loads(value, 'UTF-8', EcoJSONDecoder)
_parsed = self.convert(_decoded)
return _parsed
def html(self, value):
return self.json(value)
def convert(self, value):
# recursively decode objects and dates
if isinstance(value, list):
value = [self.convert(i) for i in value]
elif isinstance(value, dict):
for k, v in value.iteritems():
value[k] = self.convert(v)
if 'type' in value:
try:
value = self.dict_to_object(value)
except ModelNotRegisteredError:
# If we can't convert this dict to a model object,
# keep it as a dict.
pass
elif isinstance(value, (str, unicode)) and DATE_RE.match(value):
value = dt.datetime.strptime(value, DATE_FORMAT).date()
return value
def dict_to_object(self, value):
try:
m = helpers._registered_types[value['type']].api
except KeyError:
raise ModelNotRegisteredError()
value.pop('type')
if 'str' in value:
value.pop('str')
return m.objects.get(**value)
class Encoder(object):
def __init__(self, format, jsonp=None, serialize_all_fields=False,
file_handler=None, max_depth=1):
"""
Constructor, sets up Encoder behavior
format: format to output (e.g. json)
jsonp: in case the output needs to be wrapped in a jsonp method, give
the method name here
serialize_all_fields: by default, when encountering an object, the
serializer will output a dict with the type, id,
repr() and only the fields in the ModelApi's
repr_object_fields list. Set this to true to
override this and serialize all the fields
file_handler: If set to a callable, the encoder will call this for
outside code to do something with the file for filefields
and its descendants. The filefield will be passed as an argument.
max_depth: when serialiazing fields pointing to other objects, the
serializer will recurse to at most max_depth depth
"""
self.format = format
self.encode = getattr(self, format)
self.jsonp = jsonp
self.serialize_all_fields = serialize_all_fields
self.max_depth = max_depth
self.file_handler = file_handler
try:
filehandler_args = getargspec(self.file_handler)[0]
self._filehandler_backwards_compat = len(filehandler_args) > 1
except TypeError:
# Looks like self.file_handler isn't a function
self._filehandler_backwards_compat = False
def to_json(self, value):
return json.dumps(self.convert(value))
def json(self, value):
resp = self.to_json(value)
if self.jsonp:
jsonp = re.sub(r'[^a-zA-Z0-9_]', '', self.jsonp)
resp = u'%s(%s)'%(jsonp, resp)
return HttpResponse(resp, content_type='application/json; charset=utf-8')
def html(self, value):
converted = self.convert(value)
return HttpResponse(
'<html><head>%(style)s</head><body>%(body)s</body></html>' % {
'style': HTML_STYLE,
'body': self.to_html(converted)
},
content_type='text/html; charset=utf-8'
)
def to_html(self, value):
if isinstance(value, (list, tuple, set)):
if not len(value):
html = '<em>none</em>'
html = ('<table>'
+ '\n'.join(['<tr><td>%s</td></tr>' % self.to_html(v)
for v in value])
+ '</table>')
elif isinstance(value, dict):
html = ('<table>'
+ '\n'.join(['<tr><td>%s</td><td>%s</td></tr>'
% (self.to_html(k), self.to_html(v))
for k, v in value.iteritems()])
+ '</table>')
elif value is None:
html = u'<em>None</em>'
else:
html = (u'%s'%value).replace('\n', '<br/>')
return html
def convert(self, value, depth=1):
# recursively encode objects and dates
# the depth is used to monitor the recursion depth in the case
# of fields in a model which contain other model objects, such as
# a FK. But because after the initial conversion, the encoder
# will not encode all the object's fields even if serialize_all_fields
# was True on the Encoder, this will only kick in if the ModelApi for
# that model had a FK/M2M/O2M field in its repr_object_fields
if isinstance(value, (list, QuerySet, tuple, set)):
value = [self.convert(i, depth) for i in value]
elif isinstance(value, models.Model):
if (depth == 1 and self.serialize_all_fields
and depth <= self.max_depth):
value = self.convert(self.object_to_dict(value, all_fields=True),
depth + 1)
elif depth <= self.max_depth:
value = self.convert(self.object_to_dict(value, all_fields=False),
depth + 1)
else:
value = self.object_to_dict(value, all_fields=False)
elif isinstance(value, dict):
for k, v in value.iteritems():
value[k] = self.convert(v, depth)
elif isinstance(value, dt.date):
value = value.isoformat()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, FieldFile):
try:
if callable(self.file_handler):
if self._filehandler_backwards_compat:
warnings.warn(
"3-arg form of file_handle is deprecated; "
"only the first argument is necessary",
DeprecationWarning)
self.file_handler(value, value.name, value.path)
else:
self.file_handler(value)
value = {'file': value.name}
except ValueError:
value = {'file': None}
elif hasattr(value, 'to_wapiti'):
value = self.convert(value.to_wapiti(), depth)
return value
def object_to_dict(self, value, all_fields=False):
for k, v in helpers._registered_types.iteritems():
if isinstance(value, v.api.model):
type_name = k
api = v.api
break
else:
type_name = value.__class__.__name__
api = ModelApi()
try:
api_str = value.__api_unicode__()
except AttributeError:
api_str = unicode(value)
obj_dict = {'type': type_name, 'id': value.pk, 'str': api_str}
if all_fields:
fields = [f.name for f in value._meta.fields if f.name not in api.invisible_fields]
else:
fields = api.object_repr_fields
for f in fields:
# object_repr_fields is referring to fields in a different model, which is fine
# but leaving the dot in the field name sent is not nice on the other end,
# particularly in python-land where you'll need to getattr everything
key = f.replace('.', '__')
obj_dict[key] = eval('value.' + f)
return obj_dict
|
Rawk/xbmc | refs/heads/master | tools/EventClients/Clients/PS3 BD Remote/ps3_remote.py | 138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# This is a quick port of brandonj's PS3 remote script to use the event server
# for sending input events.
#
# The original script and documentation regarding the remote can be found at:
# http://forum.kodi.tv/showthread.php?tid=28765
#
#
# TODO:
# 1. Send keepalive ping at least once every 60 seconds to prevent timeouts
# 2. Permanent pairing
# 3. Detect if Kodi has been restarted (non trivial until broadcasting is
# implemented, until then maybe the HELO packet could be used instead of
# PING as keepalive
#
import sys
try:
# try loading modules from source directory
sys.path.append("../../lib/python")
from xbmcclient import *
from ps3.keymaps import keymap_remote as g_keymap # look here to change the keymapping
from bt.bt import *
ICON_PATH = "../../icons/"
except:
# fallback to system wide modules
from kodi.xbmcclient import *
from kodi.ps3.keymaps import keymap_remote as g_keymap # look here to change the keymapping
from kodi.bt.bt import *
from kodi.defs import *
import os
import time
xbmc = None
bticon = ICON_PATH + "/bluetooth.png"
def get_remote_address(remote, target_name = "BD Remote Control"):
global xbmc
target_connected = False
target_address = None
while target_connected is False:
xbmc.send_notification("Action Required!",
"Hold Start+Enter on your remote.",
bticon)
print "Searching for %s" % target_name
print "(Hold Start + Enter on remote to make it discoverable)"
time.sleep(2)
if not target_address:
try:
nearby_devices = bt_discover_devices()
except Exception, e:
print "Error performing bluetooth discovery"
print str(e)
xbmc.send_notification("Error", "Unable to find devices.", bticon)
time.sleep(5)
continue
for bdaddr in nearby_devices:
bname = bt_lookup_name( bdaddr )
addr = bt_lookup_addr ( bdaddr )
print "%s (%s) in range" % (bname,addr)
if target_name == bname:
target_address = addr
break
if target_address is not None:
print "Found %s with address %s" % (target_name, target_address)
xbmc.send_notification("Found Device",
"Pairing %s, please wait." % target_name,
bticon)
print "Attempting to pair with remote"
try:
remote.connect((target_address,19))
target_connected = True
print "Remote Paired.\a"
xbmc.send_notification("Pairing Successfull",
"Your remote was successfully "\
"paired and is ready to be used.",
bticon)
except:
del remote
remote = bt_create_socket()
target_address = None
xbmc.send_notification("Pairing Failed",
"An error occurred while attempting to "\
"pair.", bticon)
print "ERROR - Could Not Connect. Trying again..."
time.sleep(2)
else:
xbmc.send_notification("Error", "No remotes were found.", bticon)
print "Could not find BD Remote Control. Trying again..."
time.sleep(2)
return (remote,target_address)
def usage():
print """
PS3 Blu-Ray Remote Control Client for XBMC v0.1
Usage: ps3_remote.py <address> [port]
address => address of system that XBMC is running on
("localhost" if it is this machine)
port => port to send packets to
(default 9777)
"""
def process_keys(remote, xbmc):
"""
Return codes:
0 - key was processed normally
2 - socket read timeout
3 - PS and then Skip Plus was pressed (sequentially)
4 - PS and then Skip Minus was pressed (sequentially)
FIXME: move to enums
"""
done = 0
try:
xbmc.previous_key
except:
xbmc.previous_key = ""
xbmc.connect()
datalen = 0
try:
data = remote.recv(1024)
datalen = len(data)
except Exception, e:
if str(e)=="timed out":
return 2
time.sleep(2)
# some other read exception occured, so raise it
raise e
if datalen == 13:
keycode = data.encode("hex")[10:12]
if keycode == "ff":
xbmc.release_button()
return done
try:
# if the user presses the PS button followed by skip + or skip -
# return different codes.
if xbmc.previous_key == "43":
xbmc.previous_key = keycode
if keycode == "31": # skip +
return 3
elif keycode == "30": # skip -
return 4
# save previous key press
xbmc.previous_key = keycode
if g_keymap[keycode]:
xbmc.send_remote_button(g_keymap[keycode])
except Exception, e:
print "Unknown data: %s" % str(e)
return done
def main():
global xbmc, bticon
host = "127.0.0.1"
port = 9777
if len(sys.argv)>1:
try:
host = sys.argv[1]
port = sys.argv[2]
except:
pass
else:
return usage()
loop_forever = True
xbmc = XBMCClient("PS3 Bluetooth Remote",
icon_file=bticon)
while loop_forever is True:
target_connected = False
remote = bt_create_socket()
xbmc.connect(host, port)
(remote,target_address) = get_remote_address(remote)
while True:
if process_keys(remote, xbmc):
break
print "Disconnected."
try:
remote.close()
except:
print "Cannot close."
if __name__=="__main__":
main()
|
jabesq/home-assistant | refs/heads/dev | homeassistant/components/websocket_api/permissions.py | 2 | """Permission constants for the websocket API.
Separate file to avoid circular imports.
"""
from homeassistant.const import (
EVENT_COMPONENT_LOADED,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_THEMES_UPDATED)
from homeassistant.components.persistent_notification import (
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
from homeassistant.components.lovelace import EVENT_LOVELACE_UPDATED
from homeassistant.helpers.area_registry import EVENT_AREA_REGISTRY_UPDATED
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
from homeassistant.components.frontend import EVENT_PANELS_UPDATED
# These are events that do not contain any sensitive data
# Except for state_changed, which is handled accordingly.
SUBSCRIBE_WHITELIST = {
EVENT_COMPONENT_LOADED,
EVENT_PANELS_UPDATED,
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_THEMES_UPDATED,
EVENT_AREA_REGISTRY_UPDATED,
EVENT_DEVICE_REGISTRY_UPDATED,
EVENT_ENTITY_REGISTRY_UPDATED,
EVENT_LOVELACE_UPDATED,
}
|
aiyyoi/DevFest-MaxBond | refs/heads/master | MaxBond/env/lib/python2.7/site-packages/pymysql/tests/thirdparty/test_MySQLdb/dbapi20.py | 29 | #!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id$'
__version__ = '$Revision$'[11:-2]
__author__ = 'Stuart Bishop <zen@shangri-la.dropbear.id.au>'
try:
import unittest2 as unittest
except ImportError:
import unittest
import time
# $Log$
# Revision 1.1.2.1 2006/02/25 03:44:32 adustman
# Generic DB-API unit test module
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.assertTrue(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.assertTrue(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
self.assertTrue(issubclass(self.driver.Warning,Exception))
self.assertTrue(issubclass(self.driver.Error,Exception))
self.assertTrue(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.assertTrue(con.Warning is drv.Warning)
self.assertTrue(con.Error is drv.Error)
self.assertTrue(con.InterfaceError is drv.InterfaceError)
self.assertTrue(con.DatabaseError is drv.DatabaseError)
self.assertTrue(con.OperationalError is drv.OperationalError)
self.assertTrue(con.IntegrityError is drv.IntegrityError)
self.assertTrue(con.InternalError is drv.InternalError)
self.assertTrue(con.ProgrammingError is drv.ProgrammingError)
self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.assertTrue(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.assertTrue(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.assertTrue(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.assertTrue(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.assertTrue(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.assertTrue(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError('Driver need to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary('Something')
b = self.driver.Binary('')
def test_STRING(self):
self.assertTrue(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.assertTrue(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.assertTrue(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.assertTrue(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.assertTrue(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
cogeorg/black_rhino | refs/heads/master | examples/firesales_SA/networkx/generators/hybrid.py | 47 | """
Hybrid
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
_all__ = ['kl_connected_subgraph', 'is_kl_connected']
import copy
import networkx as nx
def kl_connected_subgraph(G,k,l,low_memory=False,same_as_graph=False):
""" Returns the maximum locally (k,l) connected subgraph of G.
(k,l)-connected subgraphs are presented by Fan Chung and Li
in "The Small World Phenomenon in hybrid power law graphs"
to appear in "Complex Networks" (Ed. E. Ben-Naim) Lecture
Notes in Physics, Springer (2004)
low_memory=True then use a slightly slower, but lower memory version
same_as_graph=True then return a tuple with subgraph and
pflag for if G is kl-connected
"""
H=copy.deepcopy(G) # subgraph we construct by removing from G
graphOK=True
deleted_some=True # hack to start off the while loop
while deleted_some:
deleted_some=False
for edge in H.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(list(verts))
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if prev!=w:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
H.remove_edge(u,v)
deleted_some=True
if graphOK: graphOK=False
# We looked through all edges and removed none of them.
# So, H is the maximal (k,l)-connected subgraph of G
if same_as_graph:
return (H,graphOK)
return H
def is_kl_connected(G,k,l,low_memory=False):
"""Returns True if G is kl connected."""
graphOK=True
for edge in G.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(verts)
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if w!=prev:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
graphOK=False
break
# return status
return graphOK
|
iDTLabssl/kitsune | refs/heads/master | kitsune/messages/tests/test_views.py | 15 | from multidb.middleware import PINNING_COOKIE
from nose.tools import eq_
from kitsune.messages.models import InboxMessage, OutboxMessage
from kitsune.sumo.tests import TestCase, LocalizingClient
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user
class ReadMessageTests(TestCase):
def setUp(self):
super(ReadMessageTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_mark_bulk_message_read(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.read
j = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not j.read
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(url,
{'id': [i.pk, j.pk], 'mark_read': True},
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.objects.get(pk=i.pk).read
assert InboxMessage.objects.get(pk=j.pk).read
def test_mark_bulk_read_none(self):
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(
url, {'id': [], 'mark_read': True}, follow=True)
self.assertContains(resp, 'No messages selected')
def test_mark_message_read(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.read
resp = self.client.get(reverse('messages.read', args=[i.pk]),
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.objects.get(pk=i.pk).read
assert PINNING_COOKIE in resp.cookies
def test_unread_does_not_pin(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo', read=True)
assert i.read
resp = self.client.get(reverse('messages.read', args=[i.pk]),
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.objects.get(pk=i.pk).read
assert PINNING_COOKIE not in resp.cookies
def test_mark_message_replied(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.replied
self.client.post(reverse('messages.new', locale='en-US'),
{'to': self.user2.username, 'message': 'bar',
'in_reply_to': i.pk})
assert InboxMessage.objects.get(pk=i.pk).replied
class DeleteMessageTests(TestCase):
def setUp(self):
super(DeleteMessageTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_delete_inbox_message(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
eq_(1, InboxMessage.objects.count())
resp = self.client.post(reverse('messages.delete', args=[i.pk],
locale='en-US'),
{'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, InboxMessage.objects.count())
def test_delete_many_message(self):
i = InboxMessage.objects.create(to=self.user1, sender=self.user2,
message='foo')
j = InboxMessage.objects.create(to=self.user1, sender=self.user2,
message='foo')
eq_(2, InboxMessage.objects.count())
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(url, {'id': [i.pk, j.pk],
'delete': True,
'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, InboxMessage.objects.count())
def test_delete_outbox_message(self):
i = OutboxMessage.objects.create(sender=self.user1, message='foo')
i.to.add(self.user2)
eq_(1, OutboxMessage.objects.count())
resp = self.client.post(reverse('messages.delete_outbox', args=[i.pk],
locale='en-US'),
{'confirmed': True}, follow=True)
eq_(200, resp.status_code)
eq_(0, OutboxMessage.objects.count())
def test_bulk_delete_none(self):
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(url, {'id': [], 'delete': True}, follow=True)
self.assertContains(resp, 'No messages selected')
class OutboxTests(TestCase):
client_class = LocalizingClient
def setUp(self):
super(OutboxTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_message_without_recipients(self):
OutboxMessage.objects.create(sender=self.user1, message='foo')
eq_(1, OutboxMessage.objects.count())
resp = self.client.post(reverse('messages.outbox'), follow=True)
eq_(200, resp.status_code)
def test_delete_many_outbox_message(self):
i = OutboxMessage.objects.create(sender=self.user1, message='foo')
i.to.add(self.user2)
j = OutboxMessage.objects.create(sender=self.user1, message='foo')
j.to.add(self.user2)
eq_(2, OutboxMessage.objects.count())
url = reverse('messages.outbox_bulk_action', locale='en-US')
resp = self.client.post(url, {'id': [i.pk, j.pk],
'delete': True,
'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, OutboxMessage.objects.count())
|
thdb-theo/Zombie-Survival | refs/heads/master | tests/test_baseclass.py | 1 | import unittest
import sys
import os
sys.path.insert(0, os.getcwd() + "/src")
from baseclass import BaseClass
from tile import Tile
from maths import Vector
class BaseClassTest(unittest.TestCase):
def setUpClass():
Tile.create()
def tearDownClass():
Tile.delete()
def test_creation(self):
a = BaseClass(3, 2)
b = BaseClass(2, 6, 9, 3)
self.assertEqual(a.width, Tile.length)
self.assertNotEqual(b.width, Tile.length)
self.assertIsNone(b.to)
self.assertIsInstance(a.pos, Vector)
self.assertIsInstance(a._size, Vector)
self.assertEqual(a._size, (a.width, a.height))
def test_funcs(self):
a = BaseClass(x=1, y=1, width=2, height=2)
self.assertEqual(a.centre, (2, 2))
self.assertIsInstance(a.centre, Vector)
b = BaseClass(x=0, y=0)
self.assertEqual(b.get_number(), 0)
self.assertIs(b.get_tile(), Tile.instances[0])
if __name__ == "__main__":
unittest.main() |
cjaymes/pyscap | refs/heads/master | src/scap/model/oval_5/defs/GlobToRegexFunctionType.py | 1 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
from scap.model.oval_5.defs import ELEMENT_GROUP_COMPONENT
logger = logging.getLogger(__name__)
class GlobToRegexFunctionType(Model):
MODEL_MAP = {
'elements': [
],
'attributes': {
'glob_noescape': {'type': 'BooleanType', 'default': False},
}
}
MODEL_MAP['elements'].extend(ELEMENT_GROUP_COMPONENT)
|
fungxu/Arduino | refs/heads/esp8266 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/utf8prober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
lucafavatella/intellij-community | refs/heads/cli-wip | python/testData/refactoring/move/qualifiedImport/after/src/c.py | 79 | import a
import b
def main():
print(b.f(42))
|
tiagochiavericosta/edx-platform | refs/heads/master | common/djangoapps/course_modes/migrations/0004_auto__add_field_coursemode_expiration_date.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.expiration_date'
db.add_column('course_modes_coursemode', 'expiration_date',
self.gf('django.db.models.fields.DateField')(default=None, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.expiration_date'
db.delete_column('course_modes_coursemode', 'expiration_date')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
|
Kortemme-Lab/klab | refs/heads/master | klab/bio/openbabel_util.py | 1 | import openbabel as ob
def read_molecules(filepath, single = False):
in_format = filepath.strip().split( '.' )[-1]
obconversion = ob.OBConversion()
obconversion.SetInFormat( in_format )
obmol = ob.OBMol()
molecules = []
notatend = obconversion.ReadFile( obmol, filepath )
while notatend:
molecules.append( obmol )
obmol = ob.OBMol()
notatend = obconversion.Read( obmol )
if single:
assert( len(molecules) == 1 )
return molecules[0]
else:
return molecules
def write_molecule(mol, path, output_format = None):
if output_format == None:
output_format = path.strip().split('.')[-1]
obconversion = ob.OBConversion()
obconversion.SetOutFormat(output_format)
with open(path, 'w') as f:
f.write( obconversion.WriteString(mol) )
|
lakshayg/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/conv2d_transpose_test.py | 22 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import device_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DTransposeTest(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.test_session():
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[2]):
for h in xrange(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.test_session():
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[2] - pad):
for h in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
1] - 1 - pad
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
2] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 1, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 6, 4]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 12, 8]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
for h in xrange(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
# `NCHW` data fomat is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 13, 9]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = output.eval()
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(pad, y_shape[3] - pad):
for h in xrange(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
2] - 1 - pad
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, k, h, w] = target
# copy values in the border
cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
cache_values[n, k, -1, :] = cache_values[n, k, -2, :]
self.assertAllClose(cache_values, value)
def testConv2DTransposeShapeInference(self):
# Test case for 8972
initializer = random_ops.truncated_normal(
[3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
f = variable_scope.get_variable("f", initializer=initializer)
f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
output = nn_ops.conv2d_transpose(
x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual(output.get_shape().as_list(), [None, 10, 5, 5])
if __name__ == "__main__":
test.main()
|
brianv0/sqlalchemy | refs/heads/master | lib/sqlalchemy/ext/baked.py | 40 | # sqlalchemy/ext/baked.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
def call(initial_fn, *args):
return cls(_bakery, initial_fn, args)
return call
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._cache_key, None)
if query is None:
query = self._as_query(session)
self._bakery[self._cache_key] = query.with_session(None)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
'_correlate', '_from_obj', '_mapper_adapter_map',
'_joinpath', '_joinpoint'):
query.__dict__.pop(attr, None)
self._bakery[self._cache_key] = context
return context
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def _as_query(self):
return self.bq._as_query(self.session).params(self._params)
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._cache_key, None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(self.session, context, self._params)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
return context.query.params(self._params).\
with_session(self.session)._execute_and_instances(context)
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class BakedLazyLoader(strategies.LazyLoader):
def _emit_lazyload(self, session, state, ident_key, passive):
q = BakedQuery(
self.mapper._compiled_cache,
lambda session: session.query(self.mapper))
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q:
q.select_from(self.mapper, self.parent_property.secondary))
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_path:
q.spoil()
q.add_criteria(
lambda q:
q._with_current_path(state.load_path[self.parent_property]))
if state.load_options:
q.spoil()
q.add_criteria(
lambda q: q._conditional_options(*state.load_options))
if self.use_get:
return q(session)._load_on_ident(
session.query(self.mapper), ident_key)
if self.parent_property.order_by:
q.add_criteria(
lambda q:
q.order_by(*util.to_list(self.parent_property.order_by)))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, strategies.LazyLoader):
q.add_criteria(
lambda q:
q.options(
strategy_options.Load(
rev.parent).baked_lazyload(rev.key)))
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if orm_util._none_set.intersection(params.values()):
return None
q.add_criteria(lambda q: q.filter(lazy_clause))
result = q(session).params(**params).all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {})
@baked_lazyload._add_unbound_all_fn
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {})
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
|
piergiaj/caffe-to-theano | refs/heads/master | downsample.py | 1 | """ Ops for downsampling images.
Planned:
DownsampleFactorMax, DownsampleAvg, DownsampleSoftmax.
"""
# This file should move along with conv.py
import __builtin__
import numpy
import theano
from theano import gof, Op, tensor, Variable, Apply
def max_pool2D(*args, **kwargs):
import sys
print >> sys.stderr, "DEPRECATION: max_pool2D renamed to max_pool_2d"
return max_pool_2d(*args, **kwargs)
def max_pool_2d(input, ds, ignore_border=False, st=None, padding=(0, 0)):
"""
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1])
:type input: N-D theano tensor of input images.
:param input: input images. Max pooling will be done over the 2 last
dimensions.
:type ds: tuple of length 2
:param ds: factor by which to downscale (vertical ds, horizontal ds).
(2,2) will halve the image in each dimension.
:type ignore_border: bool
:param ignore_border: When True, (5,5) input with ds=(2,2)
will generate a (2,2) output. (3,3) otherwise.
:type st: tuple of lenght 2
:param st: stride size, which is the number of shifts
over rows/cols to get the the next pool region.
if st is None, it is considered equal to ds
(no overlap on pooling regions)
:param padding: (pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins.
:type padding: tuple of two ints
"""
if input.ndim < 2:
raise NotImplementedError('max_pool_2d requires a dimension >= 2')
if input.ndim == 4:
op = DownsampleFactorMax(ds, ignore_border, st=st, padding=padding)
output = op(input)
return output
# extract image dimensions
img_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size, 1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1]),
img_shape), 'int64')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of images
op = DownsampleFactorMax(ds, ignore_border, st=st, padding=padding)
output = op(input_4D)
# restore to original shape
outshp = tensor.join(0, input.shape[:-2], output.shape[-2:])
return tensor.reshape(output, outshp, ndim=input.ndim)
class DownsampleFactorMax(Op):
"""For N-dimensional tensors, consider that the last two
dimensions span images. This Op downsamples these images by a
factor ds, by taking the max over non- overlapping rectangular
regions.
"""
__props__ = ('ds', 'ignore_border', 'st', 'padding')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=(0, 0)):
"""Return the shape of the output from this op, for input of given
shape and flags.
:param imgshape: the shape of a tensor of images. The last two elements
are interpreted as the number of rows, and the number of cols.
:type imgshape: tuple, list, or similar of integer or
scalar Theano variable.
:param ds: downsample factor over rows and columns
this parameter indicates the size of the pooling region
:type ds: list or tuple of two ints
:param st: the stride size. This is the distance between the pooling
regions. If it's set to None, in which case it equlas ds.
:type st: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include an
extra row/col of partial downsampling (False) or ignore it (True).
:type ignore_border: bool
:param padding: (pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins.
:type padding: tuple of two ints
:rtype: list
:returns: the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but with last
two elements reduced as per the downsampling & ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements '
'(rows, cols)')
if st is None:
st = ds
r, c = imgshape[-2:]
r += padding[0] * 2
c += padding[1] * 2
if ignore_border:
out_r = (r - ds[0]) // st[0] + 1
out_c = (c - ds[1]) // st[1] + 1
if isinstance(r, theano.Variable):
nr = tensor.maximum(out_r, 0)
else:
nr = numpy.maximum(out_r, 0)
if isinstance(c, theano.Variable):
nc = tensor.maximum(out_c, 0)
else:
nc = numpy.maximum(out_c, 0)
else:
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
nr = max(0, (r - 1 - ds[0]) // st[0] + 1) + 1
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
nc = max(0, (c - 1 - ds[1]) // st[1] + 1) + 1
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0)):
"""
:param ds: downsample factor over rows and column.
ds indicates the pool region size.
:type ds: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include
an extra row/col of partial downsampling (False) or
ignore it (True).
:type ignore_border: bool
: param st: stride size, which is the number of shifts
over rows/cols to get the the next pool region.
if st is None, it is considered equal to ds
(no overlap on pooling regions)
: type st: list or tuple of two ints
:param padding: (pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins.
:type padding: tuple of two ints
"""
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
raise ValueError(
"DownsampleFactorMax downsample parameters must be ints."
" Got %s" % str(ds))
if st is None:
st = ds
self.st = tuple(st)
self.ignore_border = ignore_border
self.padding = tuple(padding)
if self.padding != (0, 0) and not ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if self.padding[0] >= self.ds[0] or self.padding[1] >= self.ds[1]:
raise NotImplementedError(
'padding_h and padding_w must be smaller than strides')
def __str__(self):
return '%s{%s, %s, %s, %s}' % (
self.__class__.__name__,
self.ds, self.st, self.ignore_border, self.padding)
def make_node(self, x):
if x.type.ndim != 4:
raise TypeError()
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inp, out):
x, = inp
z, = out
if len(x.shape) != 4:
raise NotImplementedError(
'DownsampleFactorMax requires 4D input for now')
z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st,
self.padding)
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.empty(z_shape, dtype=x.dtype)
zz = z[0]
# number of pooling output rows
pr = zz.shape[-2]
# number of pooling output cols
pc = zz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pad_h = self.padding[0]
pad_w = self.padding[1]
img_rows = x.shape[-2] + 2 * pad_h
img_cols = x.shape[-1] + 2 * pad_w
# pad the image
if self.padding != (0, 0):
fill = x.min()-1.
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype) + fill
y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x
else:
y = x
# max pooling
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = __builtin__.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = c * st1
col_end = __builtin__.min(col_st + ds1, img_cols)
zz[n, k, r, c] = y[
n, k, row_st:row_end, col_st:col_end].max()
def infer_shape(self, node, in_shapes):
shp = self.out_shape(in_shapes[0], self.ds,
self.ignore_border, self.st, self.padding)
return [shp]
def grad(self, inp, grads):
x, = inp
gz, = grads
maxout = self(x)
return [DownsampleFactorMaxGrad(self.ds,
ignore_border=self.ignore_border,
st=self.st, padding=self.padding)(
x, maxout, gz)]
def c_code(self, node, name, inp, out, sub):
# No implementation is currently for the case where
# the stride size and the pooling size are different.
# An exception is raised for such a case.
if self.ds != self.st or self.padding != (0, 0):
raise theano.gof.utils.MethodNotDefined()
x, = inp
z, = out
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
return """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int x_shp0_usable;
int x_shp1_usable;
int z_shp0, z_shp1;
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
z_shp0 = PyArray_DIMS(%(x)s)[2] / %(ds0)s;
z_shp1 = PyArray_DIMS(%(x)s)[3] / %(ds1)s;
if (%(ignore_border)s)
{
x_shp0_usable = z_shp0 * %(ds0)s;
x_shp1_usable = z_shp1 * %(ds1)s;
}
else
{
z_shp0 += (PyArray_DIMS(%(x)s)[2] %% %(ds0)s) ? 1 : 0;
z_shp1 += (PyArray_DIMS(%(x)s)[3] %% %(ds1)s) ? 1 : 0;
x_shp0_usable = PyArray_DIMS(%(x)s)[2];
x_shp1_usable = PyArray_DIMS(%(x)s)[3];
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(z)s)[2] != z_shp0)
||(PyArray_DIMS(%(z)s)[3] != z_shp1)
)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[4] = {0,0,0,0};
dims[0]=PyArray_DIMS(%(x)s)[0];
dims[1]=PyArray_DIMS(%(x)s)[1];
dims[2]=z_shp0;
dims[3]=z_shp1;
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}
if (z_shp0 && z_shp1)
{
for(int b=0;b<PyArray_DIMS(%(x)s)[0];b++){
for(int k=0;k<PyArray_DIMS(%(x)s)[1];k++){
int mini_i = 0;
int zi = 0;
for(int i=0;i< x_shp0_usable; i++){
int mini_j = 0;
int zj = 0;
for(int j=0; j<x_shp1_usable; j++){
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,i,j)))[0];
dtype_%(z)s * __restrict__ z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,b,k,zi,zj)));
z[0] = (((mini_j|mini_i) == 0) || z[0] < a) ? a : z[0];
mini_j = ((mini_j + 1) == %(ds1)s) ? 0 : mini_j+1;
zj += (mini_j == 0);
}
mini_i = ((mini_i + 1) == %(ds0)s) ? 0 : mini_i+1;
zi += (mini_i == 0);
}
}
}
}
""" % locals()
def c_code_cache_version(self):
return (0, 2)
class DownsampleFactorMaxGrad(Op):
__props__ = ('ds', 'ignore_border', 'st', 'padding')
def __init__(self, ds, ignore_border, st=None, padding=(0, 0)):
self.ds = tuple(ds)
self.ignore_border = ignore_border
if st is None:
st = ds
self.st = tuple(st)
self.padding = tuple(padding)
def __str__(self):
return '%s{%s, %s, %s, %s}' % (
self.__class__.__name__,
self.ds, self.st, self.ignore_border, self.padding)
def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of
# DownsampleFactorMax, so these asserts should not fail.
assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(maxout, Variable) and maxout.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
return Apply(self, [x, maxout, gz], [x.type()])
def perform(self, node, inp, out):
x, maxout, gz = inp
gx_stg, = out
# number of pooling output rows
pr = maxout.shape[-2]
# number of pooling output cols
pc = maxout.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pad_h = self.padding[0]
pad_w = self.padding[1]
img_rows = x.shape[-2] + 2 * pad_h
img_cols = x.shape[-1] + 2 * pad_w
# pad the image
if self.padding != (0, 0):
fill = x.min()-1
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype) + fill
y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x
else:
y = x
gx = numpy.zeros_like(y)
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = __builtin__.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = c * st1
col_end = __builtin__.min(col_st + ds1, img_cols)
for row_ind in xrange(row_st, row_end):
for col_ind in xrange(col_st, col_end):
if (maxout[n, k, r, c] == y[n, k, row_ind, col_ind]):
gx[n, k, row_ind, col_ind] += gz[n, k, r, c]
# unpad the image
gx = gx[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)]
gx_stg[0] = gx
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def grad(self, inp, grads):
x, maxout, gz = inp
ggx, = grads
if self.padding == (0, 0):
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
DownsampleFactorMaxGradGrad(
self.ds, ignore_border=self.ignore_border,
st=self.st)(x, maxout, ggx)]
else:
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
theano.gradients.grad_not_implemented(
self, 2, gz, 'Hessian not implemented with padding')]
def c_code(self, node, name, inp, out, sub):
if self.ds != self.st or self.padding != (0, 0):
raise theano.gof.utils.MethodNotDefined()
x, z, gz = inp
gx, = out
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
return """
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
int x_shp0_usable;
int x_shp1_usable;
int z_shp0, z_shp1;
if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(z)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "z must be a 4d ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "gz must be a 4d ndarray");
%(fail)s;
}
z_shp0 = PyArray_DIMS(%(z)s)[2];
z_shp1 = PyArray_DIMS(%(z)s)[3];
if (%(ignore_border)s)
{
x_shp0_usable = z_shp0 * %(ds0)s;
x_shp1_usable = z_shp1 * %(ds1)s;
}
else
{
x_shp0_usable = PyArray_DIMS(%(x)s)[2];
x_shp1_usable = PyArray_DIMS(%(x)s)[3];
}
if ((!%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=4
||(PyArray_DIMS(%(gx)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(gx)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(gx)s)[2] != PyArray_DIMS(%(x)s)[2])
||(PyArray_DIMS(%(gx)s)[3] != PyArray_DIMS(%(x)s)[3])
)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(4, PyArray_DIMS(%(x)s), x_typenum,0);
}
for(int b=0;b<PyArray_DIMS(%(x)s)[0];b++){
for(int k=0;k<PyArray_DIMS(%(x)s)[1];k++){
int mini_i = 0;
int zi = 0;
for(int i=0;i< x_shp0_usable; i++){
int mini_j = 0;
int zj = 0;
for(int j=0; j< x_shp1_usable; j++){
dtype_%(x)s * __restrict__ xp = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,i,j)));
dtype_%(gx)s * __restrict__ gxp = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s,b,k,i,j)));
dtype_%(z)s * __restrict__ zp = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,b,k,zi,zj)));
dtype_%(gz)s * __restrict__ gzp = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s,b,k,zi,zj)));
gxp[0] = (zp[0] == xp[0]) ? gzp[0] : 0;
mini_j = (mini_j + 1 == %(ds1)s) ? 0 : mini_j+1;
zj += (mini_j == 0);
}//for j
mini_i = (mini_i + 1 == %(ds0)s) ? 0 : mini_i+1;
zi += (mini_i == 0);
for (int j = x_shp1_usable; j < PyArray_DIMS(%(x)s)[3]; ++j) {
dtype_%(gx)s * gxp = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s,b,k,i,j)));
gxp[0] = 0;
}
}//for i
for(int i = x_shp0_usable; i < PyArray_DIMS(%(x)s)[2]; i++){
for (int j = 0; j < PyArray_DIMS(%(x)s)[3]; ++j) {
dtype_%(gx)s * gxp = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s,b,k,i,j)));
gxp[0] = 0;
}
}
}//for k
}//for b
""" % locals()
def c_code_cache_version(self):
return (0, 2)
class DownsampleFactorMaxGradGrad(Op):
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None):
"""Return the shape of the output from this op, for input of given
shape and flags.
:param imgshape: the shape of a tensor of images. The last two elements
are interpreted as the number of rows, and the number of cols.
:type imgshape: tuple, list, or similar of integer or
scalar Theano variable.
:param ds: downsample factor over rows and columns
this parameter indicates the size of the pooling region
:type ds: list or tuple of two ints
:param st: the stride size. This is the distance between the pooling
regions. If it's set to None, in which case it equlas ds.
:type st: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include an
extra row/col of partial downsampling (False) or ignore it (True).
:type ignore_border: bool
:rtype: list
:returns: the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but with last
two elements reduced as per the downsampling & ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements '
'(rows, cols)')
if st is None:
st = ds
r, c = imgshape[-2:]
if ignore_border:
out_r = (r - ds[0]) // st[0] + 1
out_c = (c - ds[1]) // st[1] + 1
if isinstance(r, theano.Variable):
nr = tensor.maximum(out_r, 0)
else:
nr = numpy.maximum(out_r, 0)
if isinstance(c, theano.Variable):
nc = tensor.maximum(out_c, 0)
else:
nc = numpy.maximum(out_c, 0)
else:
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
nr = max(0, (r - 1 - ds[0]) // st[0] + 1) + 1
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
nc = max(0, (c - 1 - ds[1]) // st[1] + 1) + 1
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border, st=None):
self.ds = tuple(ds)
self.ignore_border = ignore_border
if st is None:
st = ds
self.st = tuple(st)
def __eq__(self, other):
return (type(self) == type(other)
and self.ds == other.ds
and self.st == other.st
and self.ignore_border == other.ignore_border)
def __hash__(self):
return hash(type(self)) ^ hash(self.ds) ^ \
hash(self.st) ^ hash(self.ignore_border)
def __str__(self):
return '%s{%s,%s,%s}' % (self.__class__.__name__,
self.ds, self.st, self.ignore_border)
def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of
# DownsampleFactorMaxGrad, so these asserts should not fail.
assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(maxout, Variable) and maxout.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
return Apply(self, [x, maxout, gz], [x.type()])
def perform(self, node, inp, out):
x, maxout, ggx = inp
z, = out
if len(x.shape) != 4:
raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires 4D input for now')
z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st)
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.zeros(self.out_shape(x.shape, self.ds,
self.ignore_border, self.st),
dtype=x.dtype)
ggz = z[0]
# number of pooling output rows
pr = ggz.shape[-2]
# number of pooling output cols
pc = ggz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
img_rows = x.shape[-2]
img_cols = x.shape[-1]
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = __builtin__.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = c * st1
col_end = __builtin__.min(col_st + ds1, img_cols)
for row_ind in xrange(row_st, row_end):
for col_ind in xrange(col_st, col_end):
if (maxout[n, k, r, c] == x[n, k, row_ind, col_ind]):
ggz[n, k, r, c] = ggx[n, k, row_ind, col_ind]
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
|
paulsheridan/django-portfolio | refs/heads/master | portfolio/projects/migrations/0007_auto_20160620_2131.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-20 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20160618_0323'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(max_length=1000),
),
migrations.AlterField(
model_name='project',
name='url',
field=models.URLField(default='', max_length=50),
),
]
|
liikGit/MissionPlanner | refs/heads/master | Lib/site-packages/scipy/optimize/nnls.py | 51 | import _nnls
from numpy import asarray_chkfinite, zeros, double
def nnls(A,b):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
Notes
-----
This is a wrapper for ``NNLS.F``.
"""
A,b = map(asarray_chkfinite, (A,b))
if len(A.shape)!=2:
raise ValueError("expected matrix")
if len(b.shape)!=1:
raise ValueError("expected vector")
m,n = A.shape
if m != b.shape[0]:
raise ValueError("incompatible dimensions")
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index=zeros((n,), dtype=int)
x,rnorm,mode = _nnls.nnls(A,m,n,b,w,zz,index)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
|
dinhoabreu/yowsup | refs/heads/master | yowsup/env/__init__.py | 39 | from .env_android import AndroidYowsupEnv
from .env_s40 import S40YowsupEnv
CURRENT_ENV = S40YowsupEnv()
|
fernandezcuesta/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_snmp_contact.py | 39 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_contact
version_added: "2.4"
short_description: Manages SNMP contact configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP contact configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
contact:
description:
- Contact information.
required: true
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp contact test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP contact"
ce_snmp_contact:
state: present
contact: call Operator at 010-99999999
provider: "{{ cli }}"
- name: "Undo SNMP contact"
ce_snmp_contact:
state: absent
contact: call Operator at 010-99999999
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info contact call Operator at 010-99999999"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class SnmpContact(object):
""" Manages SNMP contact configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.contact = self.module.params['contact']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.contact:
if len(self.contact) > 255 or len(self.contact) < 1:
self.module.fail_json(
msg='Error: The len of contact %s is out of [1 - 255].' % self.contact)
else:
self.module.fail_json(
msg='Error: The len of contact is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.contact:
self.proposed["contact"] = self.contact
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.cur_cfg["contact"] = temp_data[1]
self.existing["contact"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.end_state["contact"] = temp_data[1]
def cli_load_config(self, commands):
""" Load configure by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure by cli """
regular = "| include snmp | include contact"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info contact %s" % self.contact
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info contact"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
pass
else:
self.set_config()
else:
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
contact=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpContact(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
Lab41/pelops | refs/heads/master | pelops/training/utils.py | 3 | from pelops.datasets.chip import ChipDataset
from pelops.utils import SetType
import json
import os
import os.path
import numpy as np
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
import random
def tuple_to_string(tup):
"""Convert a tuple (or other iterable, we are not picky) to a string.
Args:
tup (tuple): An iterable full of items on which str() works.
Returns:
str: A string of all elements of the tuple, joined with underscores.
"""
return "_".join(str(i) for i in tup)
def attributes_to_classes(chip_dataset, chip_key_maker):
"""Extract a set of attributes from a set of Chips and uses them to make
unique classses.
The chip_key_maker is a function (or other callable) with the following
signature:
chip_key_maker(chip) -> string
It returns a string derived from the chip. All chips that output the same
string will be considered as part of the same class for training. An
example key_maker might do the following:
chip_key_maker(chip) -> "honda_civic"
The strings will be sorted first before an index is assigned, so that the
first string alphabetically will have index 0. This increases the
reproducibility of the dictionary as only changes to the number of classes,
not to the chips, will change the dictionary.
Args:
chip_dataset: A ChipDataset, or other iterable of Chips
chip_key_maker: A function that takes a chip and returns a string
derived from the chip.
Returns:
dict: a dictionary mapping the output of chip_key_maker(chip) to a
class number.
"""
# First we get all the keys
keys = set()
for chip in chip_dataset:
# Get the class from the specified attributes
key = chip_key_maker(chip)
keys.add(key)
class_to_index = {}
for index, key in enumerate(sorted(keys)):
class_to_index[key] = index
return class_to_index
def key_make_model(chip):
""" Given a chip, return make and model string.
Make and model are extracted from chip.misc using the keys "make" and
"model". If they are missing it returns None for that value. If misc
missing or not a dictionary, (None, None) is returned.
Args:
chip: A chip named tuple
Returns:
string: "make_model" from the chip. The string "None" may be returned
for one of the positions (or both) if it is missing in the chip.
"""
output = [None, None]
# Ensure we have a misc dictionary
if hasattr(chip, "misc"):
misc = chip.misc
if hasattr(misc, "get"):
output[0] = misc.get("make", None)
output[1] = misc.get("model", None)
return tuple_to_string(output)
def key_color(chip):
""" Given a chip, returns the color as a string.
Color is extracted from chip.misc using the key "color". If it is missing
or misc is not a dictionary, str(None) is returned
Args:
chip: A chip named tuple
Returns:
str: color from the chip. str(None) if not defined, or misc is
missing.
"""
output = [None]
# Ensure we have a misc dictionary
if hasattr(chip, "misc"):
misc = chip.misc
# Get the color
if hasattr(misc, "get"):
output[0] = misc.get("color", None)
return tuple_to_string(output)
def key_make_model_color(chip):
""" Given a chip, returns the make, model, and color as a string.
Color is extracted from chip.misc using the keys "make, "model", and
"color". If misc missing or not a dictionary, "None_None_None" is returned.
Args:
chip: A chip named tuple
Returns:
str: "make_model_color" from the chip. str(None) may be returned for
one of the positions (or any number of them) if it is missing in
the chip.
"""
make_model = key_make_model(chip)
color = key_color(chip)
return "_".join((make_model, color))
class KerasDirectory(object):
def __init__(self, chip_dataset, chip_key_maker):
""" Takes a ChipDataset and hard links the files to custom defined
class directories.
Args:
chip_dataset: A ChipDataset, or other iterable of Chips
chip_key_maker: A callable that takes a chip and returns a string
representing the attributes in that chip that you care about.
For example, you might write a function to return the make and
model, or color.
"""
# Set up internal variables
self.__chip_dataset = chip_dataset
self.__chip_key_maker = chip_key_maker
# Class setup functions
self.__set_root_dir()
# Set up the class to index mapping
self.__class_to_index = attributes_to_classes(
self.__chip_dataset,
self.__chip_key_maker,
)
print(self.__class_to_index)
def __set_root_dir(self):
""" Set the root directory for the classes based on the SetType.
If self.__chip_dataset.set_type exists, it will be used to
set the root directory name, otherwise it will default to
"all".
The final directory will be:
output_directory / root / class_number / image
"""
ROOTMAP = {
SetType.ALL.value: "all",
SetType.QUERY.value: "query",
SetType.TEST.value: "test",
SetType.TRAIN.value: "train",
}
# We write a train, test, query, or all directory as the root depending
# on the ChipDataset.
self.root = "all"
try:
set_type = self.__chip_dataset.set_type
except AttributeError:
return
try:
key = set_type.value
except AttributeError:
return
try:
self.root = ROOTMAP[set_type.value]
except KeyError:
return
def write_links(self, output_directory, root=None, write_map=True):
""" Writes links to a directory.
The final directory will be:
output_directory / root / class_number / image
Where root is set by self.__set_root_dir() and is based on
the SetType, but you can reset it by passing in root.
Args:
output_directory (str): The location to write the files to, it must
already exist.
root (str, Defaults to None): A base directory to create in the
output_directory, under which all further directories will be
written. If not specified, the class will choose between
"test", "train", "query", and "all" depending on the `SetType`
of the `chip_dataset`. If you would like no directory, use a
blank string "".
write_map (bool, defaults to True): If true, writes a JSON file of
the self.__class_to_index map.
"""
# Override root with self.root if not set
if root is None:
root = self.root
# Write a Class to number map JSON
if write_map:
map_dir = os.path.join(output_directory, root)
os.makedirs(map_dir, exist_ok=True)
self.write_map(map_dir)
# Link chips
for chip in self.__chip_dataset:
src = chip.filepath
filename = os.path.basename(src)
chip_class = self.__chip_key_maker(chip)
chip_index = self.__class_to_index[chip_class]
dest_dir = os.path.join(output_directory, root, str(chip_index))
os.makedirs(dest_dir, exist_ok=True)
dst = os.path.join(dest_dir, filename)
os.symlink(src=src, dst=dst)
def write_map(self, output_directory, filename="class_to_index_map.json"):
"""Write the class_to_index map to a JSON file.
Args:
output_directory (str): The location to write the map to.
filename (str, defaults to class_to_index_map.json): the filename
to save the JSON file to.
"""
full_path = os.path.join(output_directory, filename)
with open(full_path, "w") as open_file:
json.dump(self.__class_to_index, open_file, indent=2)
def rgb2bgr(x):
"""
given an array representation of an RGB image, change the image
into an BGR representtaion of the image
"""
return(bgr2rgb(x))
def bgr2rgb(x):
"""
given an array representation of an BGR image, change the image
into an RGB representtaion of the image
"""
y = np.zeros(x.shape)
B = x[:,:,0]
G = x[:,:,1]
R = x[:,:,2]
y[:,:,0] = R
y[:,:,1] = G
y[:,:,2] = B
return y
# load an image from disk
# NOTE: input assumed to be RGB
# NOTE: output is to be BGR for resnet use.
def load_image(img_path,
e_dims=False,
image_flip=0.5,
image_shift=0.10,
image_rotate_degrees=10,
image_zoom=0.1,
output_BGR=True):
"""
WARNING this funciton should only manipulation images meant for resnet50 consumption.
To make it applicable for other environments remove preprocess_input.
Do some image manipulation
image input assumed to be in RGB format
output format default is GBR unless output_BGR is set to False
e_dims = e_dims false will output (x,y,3) sized images
e_domes true will output (1,x,y,3) sized images
image_flip = probability that image will be flipped rt to left
image_shift = percent of image to randomly shift up/down & right/left
image_rotate_degrees = rotate image randomly
between [-image_rotate_degrees image_rotate_degrees]
image_zoom = randomly zoom image [1-image_zoom 1+image_zoom]
output_BGR = True -> image output will be in BGR formate RGB otherwise
"""
img = image.load_img(img_path, target_size=(224, 224))
my_img = image.img_to_array(img)
if image_flip is not None:
if image_flip > 1 or image_flip < -1:
raise ValueError('|image_flip:{0}| > 1'.format(image_flip))
image_flip = abs(image_flip)
if random.random() > image_flip :
my_img = image.flip_axis(my_img, axis=1)
if image_rotate_degrees is not None:
image_rotate_degrees = int(image_rotate_degrees)
if image_rotate_degrees > 360:
image_rotate_degrees = image_rotate_degrees % 360
my_img = image.random_rotation(my_img,
image_rotate_degrees,
row_index=0,
col_index=1,
channel_index=2)
if image_shift is not None:
if image_shift > 1 or image_shift < -1:
raise ValueError('|image_shift:{0}| > 1'.format(image_shift))
image_shift = abs(image_shift)
my_img = image.random_shift(my_img,
image_shift,
image_shift,
row_index=0,
col_index=1,
channel_index=2)
if image_zoom is not None:
if image_zoom > 1 or image_zoom < -1:
raise ValueError('|image_zoom:{0}| > 1'.format(image_zoom))
image_zoom = abs(image_zoom)
low = 1-image_zoom
high = 1+image_zoom
rng = [low,high]
my_img = image.random_zoom(my_img,
rng,
row_index=0,
col_index=1,
channel_index=2)
if not output_BGR:
my_img = bgr2rgb(my_img)
my_img = np.expand_dims(my_img, axis=0)
my_img = preprocess_input(my_img)
if not e_dims:
my_img = my_img.squeeze()
return my_img
|
vheon/YouCompleteMe | refs/heads/master | python/ycm/tests/server_test.py | 3 | # Copyright (C) 2016 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from ycm.test_utils import MockVimModule
MockVimModule()
import requests
import time
from ycm.client.base_request import BaseRequest
from ycm.youcompleteme import YouCompleteMe
from ycmd import user_options_store
# The default options which are only relevant to the client, not the server and
# thus are not part of default_options.json, but are required for a working
# YouCompleteMe or OmniCompleter object.
DEFAULT_CLIENT_OPTIONS = {
'server_log_level': 'info',
'extra_conf_vim_data': [],
'show_diagnostics_ui': 1,
'enable_diagnostic_signs': 1,
'enable_diagnostic_highlighting': 0,
'always_populate_location_list': 0,
}
def MakeUserOptions( custom_options = {} ):
options = dict( user_options_store.DefaultOptions() )
options.update( DEFAULT_CLIENT_OPTIONS )
options.update( custom_options )
return options
class Server_test():
def _IsReady( self ):
return BaseRequest.GetDataFromHandler( 'ready' )
def _WaitUntilReady( self, timeout = 5 ):
total_slept = 0
while True:
try:
if total_slept > timeout:
raise RuntimeError( 'Waited for the server to be ready '
'for {0} seconds, aborting.'.format(
timeout ) )
if self._IsReady():
return
except requests.exceptions.ConnectionError:
pass
finally:
time.sleep( 0.1 )
total_slept += 0.1
def setUp( self ):
self._server_state = YouCompleteMe( MakeUserOptions() )
self._WaitUntilReady()
def tearDown( self ):
self._server_state.OnVimLeave()
|
iancarv/EPBuildSystem | refs/heads/master | testes/tester.py | 2 | from testes.teste import Teste, FalhouNoTeste
class Tester(object):
def __init__(self):
teste = Teste()
self.testes = [teste]
def test(self):
for teste in self.testes:
try:
teste.run()
except FalhouNoTeste:
print('O teste falhou! =(')
return False
print('O teste passou com sucesso!')
return True |
diegocepedaw/oncall | refs/heads/master | src/oncall/api/v0/team_admins.py | 1 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from urllib.parse import unquote
from falcon import HTTPError, HTTP_201, HTTPBadRequest
from ujson import dumps as json_dumps
from ... import db
from .users import get_user_data
from ...auth import login_required, check_team_auth
from ...utils import load_json_body, subscribe_notifications, create_audit
from ...constants import ADMIN_CREATED
def on_get(req, resp, team):
"""
Get list of admin usernames for a team
**Example request**
.. sourcecode:: http
GET /api/v0/teams/team-foo/admins HTTP/1.1
Host: example.com
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
"jdoe",
"asmith"
]
"""
team = unquote(team)
connection = db.connect()
cursor = connection.cursor()
cursor.execute('''SELECT `user`.`name` FROM `user`
JOIN `team_admin` ON `team_admin`.`user_id`=`user`.`id`
JOIN `team` ON `team`.`id`=`team_admin`.`team_id`
WHERE `team`.`name`=%s''',
team)
data = [r[0] for r in cursor]
cursor.close()
connection.close()
resp.body = json_dumps(data)
@login_required
def on_post(req, resp, team):
"""
Add user as a team admin. Responds with that user's info (similar to user GET).
Subscribes this user to default notifications for the team, and adds the user
to the team (if needed).
**Example request**
.. sourcecode:: http
POST /api/v0/teams/team-foo/admins HTTP/1.1
Host: example.com
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"active": 1,
"contacts": {
"call": "+1 111-111-1111",
"email": "jdoe@example.com",
"im": "jdoe",
"sms": "+1 111-111-1111"
},
"full_name": "John Doe",
"id": 9535,
"name": "jdoe",
"photo_url": "image.example.com",
"time_zone": "US/Pacific"
}
:statuscode 201: Successful admin added
:statuscode 400: Missing name attribute in request
:statuscode 422: Invalid team/user, or user is already a team admin
"""
team = unquote(team)
check_team_auth(team, req)
data = load_json_body(req)
user_name = data.get('name')
if not user_name:
raise HTTPBadRequest('name attribute missing from request')
connection = db.connect()
cursor = connection.cursor()
cursor.execute('''(SELECT `id` FROM `team` WHERE `name`=%s)
UNION ALL
(SELECT `id` FROM `user` WHERE `name`=%s)''', (team, user_name))
results = [r[0] for r in cursor]
if len(results) < 2:
raise HTTPError('422 Unprocessable Entity', 'IntegrityError', 'invalid team or user')
(team_id, user_id) = results
try:
# also make sure user is in the team
cursor.execute('''INSERT IGNORE INTO `team_user` (`team_id`, `user_id`) VALUES (%r, %r)''',
(team_id, user_id))
cursor.execute('''INSERT INTO `team_admin` (`team_id`, `user_id`) VALUES (%r, %r)''',
(team_id, user_id))
# subscribe user to team notifications
subscribe_notifications(team, user_name, cursor)
create_audit({'user': user_name}, team, ADMIN_CREATED, req, cursor)
connection.commit()
except db.IntegrityError as e:
err_msg = str(e.args[1])
if err_msg == "Column 'team_id' cannot be null":
err_msg = 'team %s not found' % team
if err_msg == "Column 'user_id' cannot be null":
err_msg = 'user %s not found' % data['name']
else:
err_msg = 'user name "%s" is already an admin of team %s' % (data['name'], team)
raise HTTPError('422 Unprocessable Entity', 'IntegrityError', err_msg)
finally:
cursor.close()
connection.close()
resp.status = HTTP_201
resp.body = json_dumps(get_user_data(None, {'name': user_name})[0])
|
DDEFISHER/servo | refs/heads/master | tests/wpt/web-platform-tests/fetch/api/resources/method.py | 161 | def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
csuttles/utils | refs/heads/master | python/todo-api/flask/lib/python2.7/site-packages/wheel/signatures/ed25519py.py | 565 | # -*- coding: utf-8 -*-
import warnings
import os
from collections import namedtuple
from . import djbec
__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair',
'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES']
PUBLICKEYBYTES=32
SECRETKEYBYTES=64
SIGNATUREBYTES=64
Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes)
def crypto_sign(msg, sk):
"""Return signature+message given message and secret key.
The signature is the first SIGNATUREBYTES bytes of the return value.
A copy of msg is in the remainder."""
if len(sk) != SECRETKEYBYTES:
raise ValueError("Bad signing key length %d" % len(sk))
vkbytes = sk[PUBLICKEYBYTES:]
skbytes = sk[:PUBLICKEYBYTES]
sig = djbec.signature(msg, skbytes, vkbytes)
return sig + msg
def crypto_sign_open(signed, vk):
"""Return message given signature+message and the verifying key."""
if len(vk) != PUBLICKEYBYTES:
raise ValueError("Bad verifying key length %d" % len(vk))
rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk)
if not rc:
raise ValueError("rc != True", rc)
return signed[SIGNATUREBYTES:]
|
mogoweb/webkit_for_android5.1 | refs/heads/master | webkit/Tools/Scripts/webkitpy/common/prettypatch.py | 140 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
class PrettyPatch(object):
# FIXME: PrettyPatch should not require checkout_root.
def __init__(self, executive, checkout_root):
self._executive = executive
self._checkout_root = checkout_root
def pretty_diff_file(self, diff):
# Diffs can contain multiple text files of different encodings
# so we always deal with them as byte arrays, not unicode strings.
assert(isinstance(diff, str))
pretty_diff = self.pretty_diff(diff)
diff_file = tempfile.NamedTemporaryFile(suffix=".html")
diff_file.write(pretty_diff)
diff_file.flush()
return diff_file
def pretty_diff(self, diff):
# pretify.rb will hang forever if given no input.
# Avoid the hang by returning an empty string.
if not diff:
return ""
pretty_patch_path = os.path.join(self._checkout_root,
"Websites", "bugs.webkit.org",
"PrettyPatch")
prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
args = [
"ruby",
"-I",
pretty_patch_path,
prettify_path,
]
# PrettyPatch does not modify the encoding of the diff output
# so we can't expect it to be utf-8.
return self._executive.run_command(args, input=diff, decode_output=False)
|
ReachingOut/unisubs | refs/heads/staging | apps/webdriver_testing/check_teams/test_dashboard.py | 5 | # -*- coding: utf-8 -*-
import datetime
import os
from subtitles import pipeline
from django.core import management
from utils.factories import *
from webdriver_testing.webdriver_base import WebdriverTestCase
from webdriver_testing import data_helpers
from webdriver_testing.pages.site_pages import editor_page
from webdriver_testing.pages.site_pages import site_modals
from webdriver_testing.pages.site_pages.teams import dashboard_tab
from webdriver_testing.pages.site_pages.teams import tasks_tab
from webdriver_testing.data_factories import TeamLangPrefFactory
from webdriver_testing.data_factories import UserLangFactory
class TestCaseTaskFreeDashboard(WebdriverTestCase):
"""Test suite for display of Team dashboard when there are no tasks. """
NEW_BROWSER_PER_TEST_CASE = False
@classmethod
def setUpClass(cls):
super(TestCaseTaskFreeDashboard, cls).setUpClass()
cls.data_utils = data_helpers.DataHelpers()
cls.dashboard_tab = dashboard_tab.DashboardTab(cls)
cls.admin = UserFactory()
cls.manager = UserFactory()
cls.member = UserFactory()
cls.team = TeamFactory(admin=cls.admin,
manager=cls.manager,
member=cls.member)
cls.en_video = VideoFactory(primary_audio_language_code='en')
TeamVideoFactory(team=cls.team, video=cls.en_video)
cls.fr_video = VideoFactory(primary_audio_language_code='fr')
TeamVideoFactory(team=cls.team, video=cls.fr_video)
cls.video = TeamVideoFactory(team=cls.team).video
pipeline.add_subtitles(cls.en_video, 'en', SubtitleSetFactory(),
complete=True)
pipeline.add_subtitles(cls.fr_video, 'fr', SubtitleSetFactory(),
complete=True)
cls.polly_glott = TeamMemberFactory(
team = cls.team,
).user
polly_speaks = ['en', 'cs', 'ru', 'ar']
for lang in polly_speaks:
UserLangFactory(user = cls.polly_glott,
language = lang)
def setUp(self):
self.dashboard_tab.open_team_page(self.team.slug)
def test_members_generic_create_subs(self):
"""Dashboard displays generic create subs message when no orig lang specified.
"""
#Create a user that's a member of a team with language preferences set.
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.member.username, 'password')
#Verify expected videos are displayed.
self.dashboard_tab.open_team_page(self.team.slug)
langs = self.dashboard_tab.languages_needed(self.video.title)
self.assertEqual(['Create Subtitles'], langs)
def test_members_no_languages(self):
"""Dashboard displays Create Subtitles when member has no langs specified.
"""
#Create a user that's a member of a team with language preferences set.
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.member.username, 'password')
#Verify expected videos are displayed.
self.dashboard_tab.open_team_page(self.team.slug)
langs = self.dashboard_tab.languages_needed(self.en_video.title)
self.assertEqual(['Create Subtitles'], langs)
def test_members_specific_langs_needed(self):
"""Dashboard displays videos matching members language preferences.
"""
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
#Verify expected videos are displayed.
expected_lang_list = ['Create Czech Subtitles',
'Create Russian Subtitles',
'Create Arabic Subtitles']
self.dashboard_tab.open_team_page(self.team.slug)
langs = self.dashboard_tab.languages_needed(self.en_video.title)
self.assertEqual(sorted(langs), sorted(expected_lang_list))
def test_add_suggestion_displayed(self):
"""Add videos link displays for user with permissions, when no videos found.
"""
test_team = TeamFactory(admin = self.admin,
video_policy=2)
self.dashboard_tab.log_in(self.admin.username, 'password')
self.dashboard_tab.open_team_page(test_team.slug)
self.assertTrue(self.dashboard_tab.suggestion_present(suggestion_type='add'))
def test_add_suggestion_not_displayed(self):
"""Add videos link not displayed for user with no permissions, when no videos
found.
"""
test_team = TeamFactory(admin = self.admin,
member = self.member,
video_policy=2)
self.dashboard_tab.log_in(self.member.username, 'password')
self.dashboard_tab.open_team_page(test_team.slug)
self.assertFalse(self.dashboard_tab.suggestion_present(suggestion_type='add'))
def test_lang_suggestion_displayed(self):
"""Update preferred languages displayed, when no videos found.
"""
test_team = TeamFactory(admin = self.admin,
member = self.member)
self.dashboard_tab.log_in(self.member.username, 'password')
self.dashboard_tab.open_team_page(test_team.slug)
self.assertTrue(self.dashboard_tab.suggestion_present(
suggestion_type='language'))
def test_browse_suggestion_displayed(self):
"""Browse videos link displayed, when no videos found.
"""
test_team = TeamFactory(admin = self.admin,
member = self.member)
self.dashboard_tab.log_in(self.member.username, 'password')
self.dashboard_tab.open_team_page(test_team.slug)
self.assertTrue(self.dashboard_tab.suggestion_present(
suggestion_type='browse'))
def test_no_create_nonmember(self):
"""Non-members see dashboard videos without the option to create subtitles.
"""
non_member = UserFactory()
self.dashboard_tab.log_in(non_member.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
langs = self.dashboard_tab.languages_needed(self.en_video.title)
self.assertEqual(langs, None)
def test_no_create_guest(self):
"""Guests see dashboard videos without the option to create subtitles.
"""
self.dashboard_tab.log_out()
self.dashboard_tab.open_team_page(self.team.slug)
langs = self.dashboard_tab.languages_needed(self.en_video.title)
self.assertEqual(langs, None)
class TestCaseTasksEnabledDashboard(WebdriverTestCase):
"""Verify team dashboard displays for teams with tasks enabled.
"""
NEW_BROWSER_PER_TEST_CASE = False
@classmethod
def setUpClass(cls):
super(TestCaseTasksEnabledDashboard, cls).setUpClass()
cls.data_utils = data_helpers.DataHelpers()
cls.editor_pg = editor_page.EditorPage(cls)
cls.modal = site_modals.SiteModals(cls)
cls.dashboard_tab = dashboard_tab.DashboardTab(cls)
cls.tasks_tab = tasks_tab.TasksTab(cls)
cls.admin = UserFactory()
cls.manager = UserFactory()
cls.member = UserFactory()
cls.team = TeamFactory(admin=cls.admin,
manager=cls.manager,
member=cls.member,
workflow_enabled=True)
cls.team_workflow = WorkflowFactory(team = cls.team,
autocreate_subtitle=True,
autocreate_translate=True,
review_allowed = 10)
langs = ['en', 'ru', 'pt-br', 'fr', 'de', 'es']
for lc in langs:
TeamLangPrefFactory(team = cls.team,
language_code = lc,
preferred = True)
cls.en_video = VideoFactory(primary_audio_language_code='en')
TeamVideoFactory(team=cls.team, video=cls.en_video)
cls.video = TeamVideoFactory(team=cls.team).video
cls.polly_glott = TeamMemberFactory(
team = cls.team,
).user
polly_speaks = ['en', 'cs', 'ru', 'ar']
for lang in polly_speaks:
UserLangFactory(user = cls.polly_glott,
language = lang)
def setUp(self):
super(TestCaseTasksEnabledDashboard, self).setUp()
self.dashboard_tab.open_team_page(self.team.slug)
def test_members_assigned_tasks(self):
"""Members see “Videos you're working on” with assigned languages.
"""
fr_video = VideoFactory(primary_audio_language_code='fr')
tv = TeamVideoFactory(team=self.team, video=fr_video)
task = list(tv.task_set.incomplete_subtitle().filter(language='fr'))[0]
task.assignee = self.polly_glott
task.save()
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
#Verify expected videos are displayed.
self.dashboard_tab.open_team_page(self.team.slug)
self.assertTrue(self.dashboard_tab.dash_task_present(
task_type='Create French subtitles',
title=fr_video.title))
def test_manage_your_tasks_link(self):
"""manage your tasks link opens with correct filter defaults. """
ar_video = VideoFactory(primary_audio_language_code='ar')
tv = TeamVideoFactory(team=self.team, video=ar_video)
task = list(tv.task_set.incomplete_subtitle().filter(language='ar'))[0]
task.assignee = self.polly_glott
task.save()
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
#Verify expected videos are displayed.
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.manage_tasks()
self.assertIn("?assignee=me&lang=all", self.tasks_tab.current_url())
self.assertTrue(self.tasks_tab.task_present('Transcribe Arabic Subtitles',
ar_video.title))
def test_members_available_tasks(self):
"""Members see “Videos that need your help” with the relevant tasks.
"""
#Login user and go to team dashboard page
self.dashboard_tab.log_out()
video = VideoFactory(primary_audio_language_code='en')
tv = TeamVideoFactory(team=self.team, video=video)
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
#Verify expected videos are displayed.
self.dashboard_tab.open_team_page(self.team.slug)
expected_lang_list = ['Create English subtitles']
langs = self.dashboard_tab.languages_needed(video.title)
self.assertEqual(sorted(langs), sorted(expected_lang_list))
def test_no_langs_available_tasks(self):
"""Members with no lang prefs the list of available tasks in English.
"""
video = VideoFactory(primary_audio_language_code='en')
tv = TeamVideoFactory(team=self.team, video=video)
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.member.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
expected_lang_list = ['Create English subtitles']
langs = self.dashboard_tab.languages_needed(video.title)
self.assertEqual(sorted(langs), sorted(expected_lang_list))
def test_start_translation_multi(self):
"""Translation starts from dropdown lines and times from reference lang.
"""
self.logger.info('setup: Setting task policy to all team members')
self.team.task_assign_policy=20
self.team.video_policy=1
self.team.save()
en_video = VideoFactory(primary_audio_language_code='en')
pipeline.add_subtitles(en_video, 'en',
SubtitleSetFactory(), complete=True)
tv = TeamVideoFactory(team=self.team, video=en_video)
#Login user and go to team dashboard page
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.click_lang_task(en_video.title,
'Create Russian subtitles')
self.assertEqual(u'Editing Russian\u2026', self.editor_pg.working_language())
self.assertEqual('English (original)', self.editor_pg.selected_ref_language())
self.assertEqual(self.editor_pg.start_times(),
self.editor_pg.reference_times())
self.editor_pg.exit()
def test_start_subtitles_audio_known(self):
"""Start subtitles when primary audio lang known.
"""
#Login user and go to team dashboard page
video = VideoFactory(primary_audio_language_code='cs')
tv = TeamVideoFactory(team=self.team, video=video)
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.click_lang_task(video.title,
'Create Czech subtitles')
self.assertEqual(u'Editing Czech\u2026', self.editor_pg.working_language())
self.editor_pg.exit()
def test_start_subtitles_audio_unknown(self):
"""Start subtitles when primary audio not set.
"""
#Login user and go to team dashboard page
video = TeamVideoFactory(team=self.team).video
self.dashboard_tab.log_in(self.polly_glott.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.click_lang_task(video.title,
'Create subtitles')
self.modal.add_language('French', 'French')
self.assertEqual(u'Editing French\u2026', self.editor_pg.working_language())
self.editor_pg.exit()
def test_start_review(self):
"""Member starts review from any task in “Videos that need your help”.
"""
self.team_workflow.review_allowed = 10
self.team_workflow.save()
self.team.task_assign_policy=20
self.team.video_policy=1
self.team.save()
video = VideoFactory(primary_audio_language_code='en')
tv = TeamVideoFactory(team=self.team, video=video)
pipeline.add_subtitles(video, 'en', SubtitleSetFactory(),
complete=True, committer=self.polly_glott)
#Login as reviewer and start the review task.
self.dashboard_tab.log_in(self.admin.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
self.logger.info("Clicking the Review English subtitles task")
self.dashboard_tab.click_lang_task(video.title, 'Review English subtitles')
self.assertTrue(self.editor_pg.collab_panel_displayed())
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.handle_js_alert("accept")
def test_member_language_suggestion(self):
"""Members with no lang pref see the prompt to set language preference.
"""
self.dashboard_tab.open_team_page(self.team.slug)
self.dashboard_tab.log_in(self.member.username, 'password')
self.dashboard_tab.open_team_page(self.team.slug)
self.assertTrue(self.dashboard_tab.suggestion_present(
suggestion_type='authed_language'))
|
zoeyangyy/event-extraction | refs/heads/master | bishi/toutiao1.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Time : 2018/5/12 上午10:04
# @Author : Zoe
# @File : toutiao1.py
# @Description :
import re
line = input().strip()
article = ''
try:
while line:
article += line
line = input().strip()
except EOFError:
pass
print(article)
pattern = re.compile(r'//.+')
result1 = pattern.findall(article)
print(result1)
pattern = re.compile(r'/\*.+/')
result2 = pattern.findall(article)
print(result2)
pattern = re.compile(r'\".*/\*.+\*/.*\"')
result3 = pattern.findall(article)
pattern = re.compile(r'\".*//.*\"')
result4 = pattern.findall(article)
print(len(result1)+len(result2)-len(result3)-len(result4))
# //f dfs//
# /*fsd*/ /*f fil
# dfs*/
#
# dfsf
# dsf = "/*gf */"
# fdg |
alshedivat/tensorflow | refs/heads/master | tensorflow/python/estimator/keras.py | 35 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""keras python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import keras
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
keras.__all__ = [s for s in dir(keras) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.keras import *
|
s-gv/rnicu-webapp | refs/heads/master | stream-plot/src/galry/managers/mesh_manager.py | 2 | from galry import NavigationEventProcessor, InteractionManager, \
PaintManager, \
GridEventProcessor, scale_matrix, rotation_matrix, translation_matrix, \
MeshNavigationEventProcessor
from .default_manager import DefaultPaintManager, DefaultInteractionManager, \
DefaultBindings
from .plot_manager import PlotBindings
import numpy as np
def load_mesh(filename):
"""Load vertices and faces from a wavefront .obj file and generate
normals.
"""
data = np.genfromtxt(filename, dtype=[('type', np.character, 1),
('points', np.float32, 3)])
# Get vertices and faces
vertices = data['points'][data['type'] == 'v']
faces = (data['points'][data['type'] == 'f']-1).astype(np.uint32)
# Build normals
T = vertices[faces]
N = np.cross(T[::,1 ]-T[::,0], T[::,2]-T[::,0])
L = np.sqrt(N[:,0]**2+N[:,1]**2+N[:,2]**2)
N /= L[:, np.newaxis]
normals = np.zeros(vertices.shape)
normals[faces[:,0]] += N
normals[faces[:,1]] += N
normals[faces[:,2]] += N
L = np.sqrt(normals[:,0]**2+normals[:,1]**2+normals[:,2]**2)
normals /= L[:, np.newaxis]
# Scale vertices such that object is contained in [-1:+1,-1:+1,-1:+1]
vmin, vmax = vertices.min(), vertices.max()
vertices = 2*(vertices-vmin)/(vmax-vmin) - 1
return vertices, normals, faces
class MeshInteractionManager(DefaultInteractionManager):
def initialize_default(self, constrain_navigation=None):
super(MeshInteractionManager, self).initialize_default()
self.add_processor(MeshNavigationEventProcessor, name='navigation')
self.add_processor(GridEventProcessor, name='grid')
class MeshPaintManager(DefaultPaintManager):
def initialize_default(self, *args, **kwargs):
super(MeshPaintManager, self).initialize_default(*args, **kwargs)
self.set_rendering_options(activate3D=True)
class MeshBindings(PlotBindings):
def initialize(self):
super(MeshBindings, self).initialize()
self.set_rotation_mouse()
self.set_rotation_keyboard()
def set_panning_mouse(self):
# Panning: CTRL + left button mouse
self.set('LeftClickMove', 'Pan',
# key_modifier='Control',
param_getter=lambda p: (-4*p["mouse_position_diff"][0],
-4*p["mouse_position_diff"][1]))
def set_rotation_mouse(self):
# Rotation: left button mouse
self.set('MiddleClickMove', 'Rotation',
param_getter=lambda p: (3*p["mouse_position_diff"][0],
3*p["mouse_position_diff"][1]))
self.set('LeftClickMove', 'Rotation',
key_modifier='Control',
param_getter=lambda p: (3*p["mouse_position_diff"][0],
3*p["mouse_position_diff"][1]))
def set_rotation_keyboard(self):
"""Set zooming bindings with the keyboard."""
# Rotation: ALT + key arrows
self.set('KeyPress', 'Rotation',
key='Left', key_modifier='Shift',
param_getter=lambda p: (-.25, 0))
self.set('KeyPress', 'Rotation',
key='Right', key_modifier='Shift',
param_getter=lambda p: (.25, 0))
self.set('KeyPress', 'Rotation',
key='Up', key_modifier='Shift',
param_getter=lambda p: (0, .25))
self.set('KeyPress', 'Rotation',
key='Down', key_modifier='Shift',
param_getter=lambda p: (0, -.25))
def set_zoombox_mouse(self):
"""Deactivate zoombox."""
pass
def set_zoombox_keyboard(self):
"""Deactivate zoombox."""
pass
def extend(self):
"""Set rotation interactions with mouse and keyboard."""
self.set_rotation_mouse()
self.set_rotation_keyboard()
|
kirbyfan64/shedskin | refs/heads/master | examples/ac_encode.py | 6 | ## Arithmetic coding compressor and uncompressor for binary source.
## This is a cleaned-up version of AEncode.py
## (c) David MacKay - Free software. License: GPL
import os
BETA0=1;BETA1=1 ## default prior distribution
M = 30 ; ONE = (1<<M) ; HALF = (1<<(M-1))
QUARTER = (1<<(M-2)) ; THREEQU = HALF+QUARTER
def clear (c,charstack):
## print out character c, and other queued characters
a = str(c) + str(1-c)*charstack[0]
charstack[0]=0
return a
pass
def encode (string, c0=BETA0, c1=BETA1, adaptive=1,verbose=0):
b=ONE; a=0; tot0=0;tot1=0; assert c0>0; assert c1>0
if adaptive==0:
p0 = c0*1.0/(c0+c1)
pass
ans="";
charstack=[0] ## how many undecided characters remain to print
for c in string:
w=b-a
if adaptive :
cT = c0+c1
p0 = c0*1.0/cT
pass
boundary = a + int(p0*w)
if (boundary == a): boundary += 1; print "warningA"; pass # these warnings mean that some of the probabilities
if (boundary == b): boundary -= 1; print "warningB"; pass # requested by the probabilistic model
## are so small (compared to our integers) that we had to round them up to bigger values
if (c=='1') :
a = boundary
tot1 += 1
if adaptive: c1 += 1.0 ; pass
elif (c=='0'):
b = boundary
tot0 +=1
if adaptive: c0 += 1.0 ; pass
pass ## ignore other characters
while ( (a>=HALF) or (b<=HALF) ) : ## output bits
if (a>=HALF) :
ans = ans + clear(1,charstack)
a = a-HALF ;
b = b-HALF ;
else :
ans = ans + clear(0,charstack)
pass
a *= 2 ; b *= 2
pass
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
## if the gap a-b is getting small, rescale it
while ( (a>QUARTER) and (b<THREEQU) ):
charstack[0] += 1
a = 2*a-HALF
b = 2*b-HALF
pass
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
pass
# terminate
if ( (HALF-a) > (b-HALF) ) :
w = (HALF-a) ;
ans = ans + clear(0,charstack)
while ( w < HALF ) :
ans = ans + clear(1,charstack)
w *=2
pass
pass
else :
w = (b-HALF) ;
ans = ans + clear(1,charstack)
while ( w < HALF ) :
ans = ans + clear(0,charstack)
w *=2
pass
pass
return ans
pass
def decode (string, N=10000, c0=BETA0, c1=BETA1, adaptive=1,verbose=0):
## must supply N, the number of source characters remaining.
b=ONE ; a=0 ; tot0=0;tot1=0 ; assert c0>0 ; assert c1>0
model_needs_updating = 1
if adaptive==0:
p0 = c0*1.0/(c0+c1)
pass
ans=""
u=0 ; v=ONE
for c in string :
if N<=0 :
break ## break out of the string-reading loop
assert N>0
## // (u,v) is the current "encoded alphabet" binary interval, and halfway is its midpoint.
## // (a,b) is the current "source alphabet" interval, and boundary is the "midpoint"
assert u>=0 ; assert v<=ONE
halfway = u + (v-u)/2
if( c == '1' ) :
u = halfway
elif ( c=='0' ):
v = halfway
else:
pass
## // Read bits until we can decide what the source symbol was.
## // Then emulate the encoder's computations, and tie (u,v) to tag along for the ride.
while (1): ## condition at end
firsttime = 0
if(model_needs_updating):
w = b-a
if adaptive :
cT = c0 + c1 ; p0 = c0 *1.0/cT
pass
boundary = a + int(p0*w)
if (boundary == a): boundary += 1; print "warningA"; pass
if (boundary == b): boundary -= 1; print "warningB"; pass
model_needs_updating = 0
pass
if ( boundary <= u ) :
ans = ans + "1"; tot1 +=1
if adaptive: c1 += 1.0 ; pass
a = boundary ; model_needs_updating = 1 ; N-=1
elif ( boundary >= v ) :
ans = ans + "0"; tot0 +=1
if adaptive: c0 += 1.0 ; pass
b = boundary ; model_needs_updating = 1 ; N-=1
## // every time we discover a source bit, implement exactly the
## // computations that were done by the encoder (below).
else :
## // not enough bits have yet been read to know the decision.
pass
## // emulate outputting of bits by the encoder, and tie (u,v) to tag along for the ride.
while ( (a>=HALF) or (b<=HALF) ) :
if (a>=HALF) :
a = a-HALF ; b = b-HALF ; u = u-HALF ; v = v-HALF
pass
else :
pass
a *= 2 ; b *= 2 ; u *= 2 ; v *= 2 ;
model_needs_updating = 1
pass
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
## if the gap a-b is getting small, rescale it
while ( (a>QUARTER) and (b<THREEQU) ):
a = 2*a-HALF; b = 2*b-HALF ; u = 2*u-HALF ; v = 2*v-HALF
pass
if not (N>0 and model_needs_updating) : ## this is the "while" for this "do" loop
break
pass
pass
return ans
pass
def hardertest():
print "Reading the BentCoinFile"
inputfile = open( "testdata/BentCoinFile" , "r" )
outputfile = open( "tmp.zip" , "w" )
print "Compressing to tmp.zip"
s = inputfile.read()
N = len(s)
zip = encode(s, 10, 1)
outputfile.write(zip)
outputfile.close(); inputfile.close()
print "DONE compressing"
inputfile = open( "tmp.zip" , "r" )
outputfile = open( "tmp2" , "w" )
print "Uncompressing to tmp2"
unc = decode(list(inputfile.read()), N, 10, 1)
outputfile.write(unc)
outputfile.close(); inputfile.close()
print "DONE uncompressing"
print "Checking for differences..."
os.system( "diff testdata/BentCoinFile tmp2" )
os.system( "wc tmp.zip testdata/BentCoinFile tmp2" )
def test():
sl=["1010", "111", "00001000000000000000",\
"1", "10" , "01" , "0" ,"0000000", \
"000000000000000100000000000000000000000000000000100000000000000000011000000" ]
for s in sl:
print "encoding", s
N=len(s)
e = encode(s,10,1)
print "decoding", e
ds = decode(e,N,10,1)
print ds
if (ds != s) :
print s
print "ERR@"
pass
else:
print "ok ---------- "
pass
pass
if __name__ == '__main__':
test()
hardertest()
|
Myasuka/scikit-learn | refs/heads/master | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
navnm/green-hacker | refs/heads/master | examples/swift_rockstar.py | 20 | from rockstar import RockStar
swift_code = "println('Hello world')"
rock_it_bro = RockStar(days=400, file_name='hello.swift', code=swift_code)
rock_it_bro.make_me_a_rockstar()
|
keerts/home-assistant | refs/heads/dev | tests/components/light/__init__.py | 104 | """The tests for Light platforms."""
|
chdecultot/frappe | refs/heads/develop | frappe/patches/v4_1/file_manager_fix.py | 16 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import os
from frappe.utils.file_manager import get_content_hash, get_file, get_file_name
from frappe.utils import get_files_path, get_site_path
# The files missed by the previous patch might have been replaced with new files
# with the same filename
#
# This patch does the following,
# * Detect which files were replaced and rename them with name{hash:5}.extn and
# update filedata record for the new file
#
# * make missing_files.txt in site dir with files that should be recovered from
# a backup from a time before version 3 migration
#
# * Patch remaining unpatched File records.
from six import iteritems
def execute():
frappe.db.auto_commit_on_many_writes = True
rename_replacing_files()
for name, file_name, file_url in frappe.db.sql(
"""select name, file_name, file_url from `tabFile`
where ifnull(file_name, '')!='' and ifnull(content_hash, '')=''"""):
b = frappe.get_doc('File', name)
old_file_name = b.file_name
b.file_name = os.path.basename(old_file_name)
if old_file_name.startswith('files/') or old_file_name.startswith('/files/'):
b.file_url = os.path.normpath('/' + old_file_name)
else:
b.file_url = os.path.normpath('/files/' + old_file_name)
try:
_file_name, content = get_file(name)
b.content_hash = get_content_hash(content)
except IOError:
print('Warning: Error processing ', name)
b.content_hash = None
b.flags.ignore_duplicate_entry_error = True
b.save()
frappe.db.auto_commit_on_many_writes = False
def get_replaced_files():
ret = []
new_files = dict(frappe.db.sql("select name, file_name from `tabFile` where file_name not like 'files/%'"))
old_files = dict(frappe.db.sql("select name, file_name from `tabFile` where ifnull(content_hash, '')=''"))
invfiles = invert_dict(new_files)
for nname, nfilename in iteritems(new_files):
if 'files/' + nfilename in old_files.values():
ret.append((nfilename, invfiles[nfilename]))
return ret
def rename_replacing_files():
replaced_files = get_replaced_files()
if len(replaced_files):
missing_files = [v[0] for v in replaced_files]
with open(get_site_path('missing_files.txt'), 'w') as f:
f.write(('\n'.join(missing_files) + '\n').encode('utf-8'))
for file_name, file_datas in replaced_files:
print ('processing ' + file_name)
content_hash = frappe.db.get_value('File', file_datas[0], 'content_hash')
if not content_hash:
continue
new_file_name = get_file_name(file_name, content_hash)
if os.path.exists(get_files_path(new_file_name)):
continue
print('skipping ' + file_name)
try:
os.rename(get_files_path(file_name), get_files_path(new_file_name))
except OSError:
print('Error renaming ', file_name)
for name in file_datas:
f = frappe.get_doc('File', name)
f.file_name = new_file_name
f.file_url = '/files/' + new_file_name
f.save()
def invert_dict(ddict):
ret = {}
for k,v in iteritems(ddict):
if not ret.get(v):
ret[v] = [k]
else:
ret[v].append(k)
return ret
def get_file_name(fname, hash):
if '.' in fname:
partial, extn = fname.rsplit('.', 1)
else:
partial = fname
extn = ''
return '{partial}{suffix}.{extn}'.format(partial=partial, extn=extn, suffix=hash[:5])
|
odoousers2014/addons-yelizariev | refs/heads/8.0 | res_users_signature_hr/__openerp__.py | 7 | {
'name': "Signature templates for user emails (HR)",
'version': '1.0.0',
'author': 'Ivan Yelizariev',
'category': 'Custom',
'website': 'https://yelizariev.github.io',
'depends': ['res_users_signature', 'hr'],
'data': [
'views.xml',
],
'installable': True,
'auto_install': True,
}
|
nicobustillos/odoo | refs/heads/8.0 | addons/mrp/report/workcenter_load.py | 437 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render import render
from openerp.report.interface import report_int
import time
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.report.misc import choice_colors
import StringIO
from pychart import *
theme.use_color = 1
#
# TODO: Bad code, seems buggy, TO CHECK !
#
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
def _compute_dates(self, time_unit, start, stop):
if not stop:
stop = start
if time_unit == 'month':
dates = {}
a = int(start.split("-")[0])*12 + int(start.split("-")[1])
z = int(stop.split("-")[0])*12 + int(stop.split("-")[1]) + 1
for i in range(a,z):
year = i/12
month = i%12
if month == 0:
year -= 1
month = 12
months = {1:"January",2:"February",3:"March",4:"April",5:"May",6:"June",7:"July",8:"August",9:"September",10:"October",11:"November",12:"December"}
dates[i] = {
'name' :months[month],
'start':(datetime(year, month, 2) + relativedelta(day=1)).strftime('%Y-%m-%d'),
'stop' :(datetime(year, month, 2) + relativedelta(day=31)).strftime('%Y-%m-%d'),
}
return dates
elif time_unit == 'week':
dates = {}
start_week = date(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2])).isocalendar()
end_week = date(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2])).isocalendar()
a = int(start.split("-")[0])*52 + start_week[1]
z = int(stop.split("-")[0])*52 + end_week[1]
for i in range(a,z+1):
year = i/52
week = i%52
d = date(year, 1, 1)
dates[i] = {
'name' :"Week #%d" % week,
'start':(d + timedelta(days=-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
'stop' :(d + timedelta(days=6-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
}
return dates
else: # time_unit = day
dates = {}
a = datetime(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2]))
z = datetime(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2]))
i = a
while i <= z:
dates[map(int,i.strftime('%Y%m%d').split())[0]] = {
'name' :i.strftime('%Y-%m-%d'),
'start':i.strftime('%Y-%m-%d'),
'stop' :i.strftime('%Y-%m-%d'),
}
i = i + relativedelta(days=+1)
return dates
return {}
def create(self, cr, uid, ids, datas, context=None):
assert len(ids), 'You should provide some ids!'
colors = choice_colors(len(ids))
cr.execute(
"SELECT MAX(mrp_production.date_planned) AS stop,MIN(mrp_production.date_planned) AS start "\
"FROM mrp_workcenter, mrp_production, mrp_production_workcenter_line "\
"WHERE mrp_production_workcenter_line.production_id=mrp_production.id "\
"AND mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id "\
"AND mrp_production.state NOT IN ('cancel','done') "\
"AND mrp_workcenter.id IN %s",(tuple(ids),))
res = cr.dictfetchone()
if not res['stop']:
res['stop'] = time.strftime('%Y-%m-%d %H:%M:%S')
if not res['start']:
res['start'] = time.strftime('%Y-%m-%d %H:%M:%S')
dates = self._compute_dates(datas['form']['time_unit'], res['start'][:10], res['stop'][:10])
dates_list = dates.keys()
dates_list.sort()
x_index = []
for date in dates_list:
x_index.append((dates[date]['name'], date))
pdf_string = StringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.set_title("Work Center Loads")
chart_object.set_defaults(line_plot.T, line_style=None)
if datas['form']['measure_unit'] == 'cycles':
y_label = "Load (Cycles)"
else:
y_label = "Load (Hours)"
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bWork Center Loads", line_style=None)
tb.draw()
ar = area.T(legend = legend.T(),
x_grid_style = line_style.gray70_dash1,
x_axis = axis.X(label="Periods", format="/a90/hC%s"),
x_coord = category_coord.T(x_index, 0),
y_axis = axis.Y(label=y_label),
y_range = (0, None),
size = (640,480))
bar_plot.fill_styles.reset();
# select workcenters
cr.execute(
"SELECT mw.id, rs.name FROM mrp_workcenter mw, resource_resource rs " \
"WHERE mw.id IN %s and mw.resource_id=rs.id " \
"ORDER BY mw.id" ,(tuple(ids),))
workcenters = cr.dictfetchall()
data = []
for date in dates_list:
vals = []
for workcenter in workcenters:
cr.execute("SELECT SUM(mrp_production_workcenter_line.hour) AS hours, SUM(mrp_production_workcenter_line.cycle) AS cycles, \
resource_resource.name AS name, mrp_workcenter.id AS id \
FROM mrp_production_workcenter_line, mrp_production, mrp_workcenter, resource_resource \
WHERE (mrp_production_workcenter_line.production_id=mrp_production.id) \
AND (mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id) \
AND (mrp_workcenter.resource_id=resource_resource.id) \
AND (mrp_workcenter.id=%s) \
AND (mrp_production.date_planned BETWEEN %s AND %s) \
GROUP BY mrp_production_workcenter_line.workcenter_id, resource_resource.name, mrp_workcenter.id \
ORDER BY mrp_workcenter.id", (workcenter['id'], dates[date]['start'] + ' 00:00:00', dates[date]['stop'] + ' 23:59:59'))
res = cr.dictfetchall()
if not res:
vals.append(0.0)
else:
if datas['form']['measure_unit'] == 'cycles':
vals.append(res[0]['cycles'] or 0.0)
else:
vals.append(res[0]['hours'] or 0.0)
toto = [dates[date]['name']]
for val in vals:
toto.append(val)
data.append(toto)
workcenter_num = 0
for workcenter in workcenters:
f = fill_style.Plain()
f.bgcolor = colors[workcenter_num]
ar.add_plot(bar_plot.T(label=workcenter['name'], data=data, fill_style=f, hcol=workcenter_num+1, cluster=(workcenter_num, len(res))))
workcenter_num += 1
if (not data) or (len(data[0]) <= 1):
ar = self._empty_graph(time.strftime('%Y-%m-%d'))
ar.draw(can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
def _empty_graph(self, date):
data = [[date, 0]]
ar = area.T(x_coord = category_coord.T(data, 0), y_range = (0, None),
x_axis = axis.X(label="Periods"),
y_axis = axis.Y(label="Load"))
ar.add_plot(bar_plot.T(data = data, label="No production order"))
return ar
report_custom('report.mrp.workcenter.load')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mKeRix/home-assistant | refs/heads/dev | tests/components/hue/test_device_trigger.py | 12 | """The tests for Philips Hue device triggers."""
import pytest
from homeassistant.components import hue
import homeassistant.components.automation as automation
from homeassistant.components.hue import device_trigger
from homeassistant.setup import async_setup_component
from .conftest import setup_bridge_for_sensors as setup_bridge
from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
)
REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1}
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, mock_bridge, device_reg):
"""Test we get the expected triggers from a hue remote."""
mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 remotes, just 1 battery sensor
assert len(hass.states.async_all()) == 1
# Get triggers for specific tap switch
hue_tap_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={}
)
triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id)
expected_triggers = [
{
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_tap_device.id,
"type": t_type,
"subtype": t_subtype,
}
for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys()
]
assert_lists_same(triggers, expected_triggers)
# Get triggers for specific dimmer switch
hue_dimmer_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={}
)
triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id)
trigger_batt = {
"platform": "device",
"domain": "sensor",
"device_id": hue_dimmer_device.id,
"type": "battery_level",
"entity_id": "sensor.hue_dimmer_switch_1_battery_level",
}
expected_triggers = [
trigger_batt,
*[
{
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_dimmer_device.id,
"type": t_type,
"subtype": t_subtype,
}
for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys()
],
]
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls):
"""Test for button press trigger firing."""
mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 1
# Set an automation with a specific tap switch trigger
hue_tap_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_tap_device.id,
"type": "remote_button_short_press",
"subtype": "button_4",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "B4 - {{ trigger.event.data.event }}"
},
},
},
{
"trigger": {
"platform": "device",
"domain": hue.DOMAIN,
"device_id": "mock-device-id",
"type": "remote_button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "B1 - {{ trigger.event.data.event }}"
},
},
},
]
},
)
# Fake that the remote is being pressed.
new_sensor_response = dict(REMOTES_RESPONSE)
new_sensor_response["7"]["state"] = {
"buttonevent": 18,
"lastupdated": "2019-12-28T22:58:02",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(calls) == 1
assert calls[0].data["some"] == "B4 - 18"
# Fake another button press.
new_sensor_response = dict(REMOTES_RESPONSE)
new_sensor_response["7"]["state"] = {
"buttonevent": 34,
"lastupdated": "2019-12-28T22:58:05",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 3
assert len(calls) == 1
|
Plexxi/st2 | refs/heads/master | st2tests/st2tests/mocks/action.py | 3 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mock classes for use in pack testing.
"""
from __future__ import absolute_import
from logging import RootLogger
from mock import Mock
from python_runner.python_action_wrapper import ActionService
from st2tests.mocks.datastore import MockDatastoreService
__all__ = ["MockActionWrapper", "MockActionService"]
class MockActionWrapper(object):
def __init__(self, pack, class_name):
self._pack = pack
self._class_name = class_name
class MockActionService(ActionService):
"""
Mock ActionService for use in testing.
"""
def __init__(self, action_wrapper):
self._action_wrapper = action_wrapper
# Holds a mock logger instance
# We use a Mock class so use can assert logger was called with particular arguments
self._logger = Mock(spec=RootLogger)
self._datastore_service = MockDatastoreService(
logger=self._logger,
pack_name=self._action_wrapper._pack,
class_name=self._action_wrapper._class_name,
)
@property
def datastore_service(self):
return self._datastore_service
|
Tinkerforge/brickv | refs/heads/master | src/brickv/plugin_system/plugins/red/program_info_octave.py | 1 | # -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014-2015 Matthias Bolte <matthias@tinkerforge.com>
program_info_octave.py: Program Octave Info Widget
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from brickv.plugin_system.plugins.red.program_info import ProgramInfo
from brickv.plugin_system.plugins.red.program_utils import Constants
from brickv.plugin_system.plugins.red.ui_program_info_octave import Ui_ProgramInfoOctave
class ProgramInfoOctave(ProgramInfo, Ui_ProgramInfoOctave):
def __init__(self, context):
ProgramInfo.__init__(self, context)
self.setupUi(self)
self.check_show_advanced_options.stateChanged.connect(self.update_ui_state)
# overrides ProgramInfo.update_ui_state
def update_ui_state(self):
show_advanced_options = self.check_show_advanced_options.isChecked()
# version
def cb_octave_versions(versions):
for version in versions:
if version.executable == self.program.executable:
self.label_version.setText(version.version)
return
self.label_version.setText('<unknown>')
self.get_executable_versions('octave', cb_octave_versions)
# start mode
start_mode_api_name = self.program.cast_custom_option_value('octave.start_mode', str, '<unknown>')
start_mode = Constants.get_octave_start_mode(start_mode_api_name)
start_mode_script_file = start_mode == Constants.OCTAVE_START_MODE_SCRIPT_FILE
self.label_start_mode.setText(Constants.octave_start_mode_display_names[start_mode])
# script file
self.label_script_file_title.setVisible(start_mode_script_file)
self.label_script_file.setVisible(start_mode_script_file)
self.label_script_file.setText(self.program.cast_custom_option_value('octave.script_file', str, '<unknown>'))
# working directory
self.label_working_directory_title.setVisible(show_advanced_options)
self.label_working_directory.setVisible(show_advanced_options)
self.label_working_directory.setText(self.program.working_directory)
# options
self.label_options_title.setVisible(show_advanced_options)
self.label_options.setVisible(show_advanced_options)
self.label_options.setText('\n'.join(self.program.cast_custom_option_value_list('octave.options', str, [])))
|
MarkTseng/django-farmersale | refs/heads/master | farmersale-env/lib/python2.7/site-packages/django/contrib/auth/tests/test_tokens.py | 117 | from datetime import date, timedelta
import sys
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
@skipIfCustomUser
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(sys.version_info[:2] >= (3, 0), "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
|
puckipedia/youtube-dl | refs/heads/master | youtube_dl/extractor/escapist.py | 98 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
determine_ext,
clean_html,
int_or_none,
float_or_none,
)
def _decrypt_config(key, string):
a = ''
i = ''
r = ''
while len(a) < (len(string) / 2):
a += key
a = a[0:int(len(string) / 2)]
t = 0
while t < len(string):
i += chr(int(string[t] + string[t + 1], 16))
t += 2
icko = [s for s in i]
for t, c in enumerate(a):
r += chr(ord(c) ^ ord(icko[t]))
return r
class EscapistIE(InfoExtractor):
_VALID_URL = r'https?://?(?:www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
_TESTS = [{
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
'info_dict': {
'id': '6618',
'ext': 'mp4',
'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
'title': "Breaking Down Baldur's Gate",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 264,
'uploader': 'The Escapist',
}
}, {
'url': 'http://www.escapistmagazine.com/videos/view/zero-punctuation/10044-Evolve-One-vs-Multiplayer',
'md5': '9e8c437b0dbb0387d3bd3255ca77f6bf',
'info_dict': {
'id': '10044',
'ext': 'mp4',
'description': 'This week, Zero Punctuation reviews Evolve.',
'title': 'Evolve - One vs Multiplayer',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 304,
'uploader': 'The Escapist',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
ims_video = self._parse_json(
self._search_regex(
r'imsVideo\.play\(({.+?})\);', webpage, 'imsVideo'),
video_id)
video_id = ims_video['videoID']
key = ims_video['hash']
config_req = compat_urllib_request.Request(
'http://www.escapistmagazine.com/videos/'
'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
config_req.add_header('Referer', url)
config = self._download_webpage(config_req, video_id, 'Downloading video config')
data = json.loads(_decrypt_config(key, config))
video_data = data['videoData']
title = clean_html(video_data['title'])
duration = float_or_none(video_data.get('duration'), 1000)
uploader = video_data.get('publisher')
formats = [{
'url': video['src'],
'format_id': '%s-%sp' % (determine_ext(video['src']), video['res']),
'height': int_or_none(video.get('res')),
} for video in data['files']['videos']]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'duration': duration,
'uploader': uploader,
}
|
logicus4078/vertx-web | refs/heads/master | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py | 550 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
eranchetz/nupic | refs/heads/master | external/linux32/lib/python2.6/site-packages/pylab.py | 105 | from matplotlib.pylab import *
import matplotlib.pylab
__doc__ = matplotlib.pylab.__doc__
|
damoguyan8844/TeamTalk | refs/heads/master | win-client/3rdParty/src/json/test/runjsontests.py | 175 | import sys
import os
import os.path
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None,
use_valgrind=False, with_json_checker=False ):
if not input_dir:
input_dir = os.path.join( os.getcwd(), 'data' )
tests = glob( os.path.join( input_dir, '*.json' ) )
if with_json_checker:
test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
else:
test_jsonchecker = []
failed_tests = []
valgrind_path = use_valgrind and VALGRIND_CMD or ''
for input_path in tests + test_jsonchecker:
expect_failure = os.path.basename( input_path ).startswith( 'fail' )
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
print 'TESTING:', input_path,
options = is_json_checker_test and '--json-checker' or ''
pipe = os.popen( "%s%s %s %s" % (
valgrind_path, jsontest_executable_path, options,
input_path) )
process_output = pipe.read()
status = pipe.close()
if is_json_checker_test:
if expect_failure:
if status is None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
safeReadFile(input_path)) )
else:
print 'OK'
else:
if status is not None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
print 'OK'
else:
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.add_option("-c", "--with-json-checker",
action="store_true", dest="with_json_checker", default=False,
help="run all the tests from the official JSONChecker test suite of json.org")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error( 'Must provides at least path to jsontestrunner executable.' )
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
if len(args) > 1:
input_path = os.path.normpath( os.path.abspath( args[1] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path,
use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
sys.exit( status )
if __name__ == '__main__':
main()
|
hynnet/openwrt-mt7620 | refs/heads/master | staging_dir/host/lib/python2.7/multiprocessing/reduction.py | 217 | #
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = []
import os
import sys
import socket
import threading
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.forking import Popen, duplicate, close, ForkingPickler
from multiprocessing.util import register_after_fork, debug, sub_debug
from multiprocessing.connection import Client, Listener
#
#
#
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from _multiprocessing import win32
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_multiprocessing.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_multiprocessing.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.PipeConnection, reduce_pipe_connection)
|
chafique-delli/OpenUpgrade | refs/heads/master | addons/website_certification/__openerp__.py | 64 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Certified People',
'category': 'Website',
'summary': 'Display your network of certified people on your website',
'version': '1.0',
'author': 'OpenERP S.A.',
'depends': ['marketing', 'website'],
'description': """
Display your network of certified people on your website
""",
'data': [
'security/ir.model.access.csv',
'views/website_certification_views.xml',
'views/website_certification_templates.xml',
],
'installable': True,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.